From: Tamas K Lengyel <tklengyel@sec.in.tum.de>
To: xen-devel@lists.xen.org
Cc: ian.campbell@citrix.com, tim@xen.org, julien.grall@linaro.org,
ian.jackson@eu.citrix.com, stefano.stabellini@citrix.com,
andres@lagarcavilla.org, jbeulich@suse.com,
dgdegra@tycho.nsa.gov, Tamas K Lengyel <tklengyel@sec.in.tum.de>
Subject: [PATCH for-4.5 v6 13/17] xen/arm: Data abort exception (R/W) mem_events.
Date: Mon, 15 Sep 2014 16:02:51 +0200 [thread overview]
Message-ID: <1410789775-24197-14-git-send-email-tklengyel@sec.in.tum.de> (raw)
In-Reply-To: <1410789775-24197-1-git-send-email-tklengyel@sec.in.tum.de>
This patch enables to store, set, check and deliver LPAE R/W mem_events.
As the LPAE PTE's lack enough available software programmable bits,
we store the permissions in a Radix tree, where the key is the pfn
of a 4k page. Only settings other than p2m_access_rwx are saved
in the Radix tree.
Signed-off-by: Tamas K Lengyel <tklengyel@sec.in.tum.de>
---
v6: - Add helper function p2m_shatter_page.
- Only allocate 4k pages when mem_access is in use.
- If no setting was found in radix tree but PTE exists,
return rwx as permission.
- Move the inclusion of various headers into this patch.
- Make npfec a const.
v5: - Move p2m_set_entry's logic into apply_one_level via
a new p2m_op, MEMACCESS.
v4: - Add p2m_mem_access_radix_set function to be called
when inserting new PTE's and when updating existing entries.
- Switch p2m_mem_access_check to return bool_t.
- Use new struct npfec to pass violation info.
v3: - Add new function for updating the PTE entries, p2m_set_entry.
- Use the new struct npfec to pass violation information.
- Implement n2rwx, rx2rw and listener required routines.
v2: - Patch been split to ease the review process.
- Add definitions of data abort data fetch status codes (enum dabt_dfsc)
and only call p2m_mem_access_check for traps caused by permission violations.
- Only call p2m_write_pte in p2m_lookup if the PTE permission actually changed.
- Properly save settings in the Radix tree and pause the VCPU with
mem_event_vcpu_pause.
---
xen/arch/arm/p2m.c | 494 ++++++++++++++++++++++++++++++++++++----
xen/arch/arm/traps.c | 28 ++-
xen/include/asm-arm/p2m.h | 18 ++
xen/include/asm-arm/processor.h | 30 +++
4 files changed, 520 insertions(+), 50 deletions(-)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 627da2f..ce0982c 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -5,6 +5,9 @@
#include <xen/errno.h>
#include <xen/domain_page.h>
#include <xen/bitops.h>
+#include <xen/mem_event.h>
+#include <xen/mem_access.h>
+#include <public/mem_event.h>
#include <asm/flushtlb.h>
#include <asm/gic.h>
#include <asm/event.h>
@@ -149,6 +152,74 @@ static lpae_t *p2m_map_first(struct p2m_domain *p2m, paddr_t addr)
return __map_domain_page(page);
}
+static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a)
+{
+ /* First apply type permissions */
+ switch ( t )
+ {
+ case p2m_ram_rw:
+ e->p2m.xn = 0;
+ e->p2m.write = 1;
+ break;
+
+ case p2m_ram_ro:
+ e->p2m.xn = 0;
+ e->p2m.write = 0;
+ break;
+
+ case p2m_iommu_map_rw:
+ case p2m_map_foreign:
+ case p2m_grant_map_rw:
+ case p2m_mmio_direct:
+ e->p2m.xn = 1;
+ e->p2m.write = 1;
+ break;
+
+ case p2m_iommu_map_ro:
+ case p2m_grant_map_ro:
+ case p2m_invalid:
+ e->p2m.xn = 1;
+ e->p2m.write = 0;
+ break;
+
+ case p2m_max_real_type:
+ BUG();
+ break;
+ }
+
+ /* Then restrict with access permissions */
+ switch ( a )
+ {
+ case p2m_access_n:
+ e->p2m.read = e->p2m.write = 0;
+ e->p2m.xn = 1;
+ break;
+ case p2m_access_r:
+ e->p2m.write = 0;
+ e->p2m.xn = 1;
+ break;
+ case p2m_access_x:
+ e->p2m.write = 0;
+ e->p2m.read = 0;
+ break;
+ case p2m_access_rx:
+ e->p2m.write = 0;
+ break;
+ case p2m_access_w:
+ e->p2m.read = 0;
+ e->p2m.xn = 1;
+ break;
+ case p2m_access_rw:
+ e->p2m.xn = 1;
+ break;
+ case p2m_access_wx:
+ e->p2m.read = 0;
+ break;
+ case p2m_access_rwx:
+ break;
+ }
+}
+
/*
* Lookup the MFN corresponding to a domain's PFN.
*
@@ -259,37 +330,7 @@ static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr,
break;
}
- switch (t)
- {
- case p2m_ram_rw:
- e.p2m.xn = 0;
- e.p2m.write = 1;
- break;
-
- case p2m_ram_ro:
- e.p2m.xn = 0;
- e.p2m.write = 0;
- break;
-
- case p2m_iommu_map_rw:
- case p2m_map_foreign:
- case p2m_grant_map_rw:
- case p2m_mmio_direct:
- e.p2m.xn = 1;
- e.p2m.write = 1;
- break;
-
- case p2m_iommu_map_ro:
- case p2m_grant_map_ro:
- case p2m_invalid:
- e.p2m.xn = 1;
- e.p2m.write = 0;
- break;
-
- case p2m_max_real_type:
- BUG();
- break;
- }
+ p2m_set_permission(&e, t, a);
ASSERT(!(pa & ~PAGE_MASK));
ASSERT(!(pa & ~PADDR_MASK));
@@ -381,6 +422,7 @@ enum p2m_operation {
REMOVE,
RELINQUISH,
CACHEFLUSH,
+ MEMACCESS,
};
/* Put any references on the single 4K page referenced by pte. TODO:
@@ -441,6 +483,53 @@ static bool_t is_mapping_aligned(const paddr_t start_gpaddr,
return true;
}
+static long p2m_mem_access_radix_set(struct p2m_domain *p2m, unsigned long pfn,
+ p2m_access_t a)
+{
+ long rc;
+
+ if ( p2m_access_rwx == a )
+ {
+ radix_tree_delete(&p2m->mem_access_settings, pfn);
+ return 0;
+ }
+
+ rc = radix_tree_insert(&p2m->mem_access_settings, pfn,
+ radix_tree_int_to_ptr(a));
+
+ if ( -EEXIST == rc )
+ {
+ /* If a setting existed already, change it to the new one */
+ radix_tree_replace_slot(
+ radix_tree_lookup_slot(
+ &p2m->mem_access_settings, pfn),
+ radix_tree_int_to_ptr(a));
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static int p2m_shatter_page(struct domain *d,
+ lpae_t *entry,
+ unsigned int level,
+ paddr_t level_shift,
+ bool_t flush_cache)
+{
+ int rc = p2m_create_table(d, entry,
+ level_shift - PAGE_SHIFT, flush_cache);
+
+ if ( !rc )
+ {
+ struct p2m_domain *p2m = &d->arch.p2m;
+ p2m->stats.shattered[level]++;
+ p2m->stats.mappings[level]--;
+ p2m->stats.mappings[level+1] += LPAE_ENTRIES;
+ }
+
+ return rc;
+}
+
#define P2M_ONE_DESCEND 0
#define P2M_ONE_PROGRESS_NOP 0x1
#define P2M_ONE_PROGRESS 0x10
@@ -494,13 +583,31 @@ static int apply_one_level(struct domain *d,
if ( p2m_valid(orig_pte) )
return P2M_ONE_DESCEND;
- if ( is_mapping_aligned(*addr, end_gpaddr, 0, level_size) )
+ if ( level < 3 && p2m_access_rwx != a )
+ {
+ /* We create only 4k pages when mem_access is in use. */
+ }
+ else if ( is_mapping_aligned(*addr, end_gpaddr, 0, level_size) )
{
struct page_info *page;
page = alloc_domheap_pages(d, level_shift - PAGE_SHIFT, 0);
if ( page )
{
+ if ( 3 == level )
+ {
+ rc = p2m_mem_access_radix_set(p2m, paddr_to_pfn(*addr), a);
+ if ( rc < 0 )
+ {
+ free_domheap_page(page);
+ return rc;
+ }
+ }
+ else
+ {
+ a = p2m_access_rwx;
+ }
+
pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t, a);
if ( level < 3 )
pte.p2m.table = 0;
@@ -521,8 +628,8 @@ static int apply_one_level(struct domain *d,
/*
* If we get here then we failed to allocate a sufficiently
* large contiguous region for this level (which can't be
- * L3). Create a page table and continue to descend so we try
- * smaller allocations.
+ * L3) or mem_access is in use. Create a page table and
+ * continue to descend so we try smaller allocations.
*/
rc = p2m_create_table(d, entry, 0, flush_cache);
if ( rc < 0 )
@@ -535,6 +642,17 @@ static int apply_one_level(struct domain *d,
/* We do not handle replacing an existing table with a superpage */
(level == 3 || !p2m_table(orig_pte)) )
{
+ if ( 3 == level )
+ {
+ rc = p2m_mem_access_radix_set(p2m, paddr_to_pfn(*addr), a);
+ if ( rc < 0 )
+ return rc;
+ }
+ else
+ {
+ a = p2m_access_rwx;
+ }
+
/* New mapping is superpage aligned, make it */
pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t, a);
if ( level < 3 )
@@ -585,14 +703,10 @@ static int apply_one_level(struct domain *d,
if ( p2m_mapping(orig_pte) )
{
*flush = true;
- rc = p2m_create_table(d, entry,
- level_shift - PAGE_SHIFT, flush_cache);
+ rc = p2m_shatter_page(d, entry, level, level_shift,
+ flush_cache);
if ( rc < 0 )
return rc;
-
- p2m->stats.shattered[level]++;
- p2m->stats.mappings[level]--;
- p2m->stats.mappings[level+1] += LPAE_ENTRIES;
} /* else: an existing table mapping -> descend */
BUG_ON(!p2m_table(*entry));
@@ -627,15 +741,11 @@ static int apply_one_level(struct domain *d,
* and descend.
*/
*flush = true;
- rc = p2m_create_table(d, entry,
- level_shift - PAGE_SHIFT, flush_cache);
+ rc = p2m_shatter_page(d, entry, level, level_shift, flush_cache);
+
if ( rc < 0 )
return rc;
- p2m->stats.shattered[level]++;
- p2m->stats.mappings[level]--;
- p2m->stats.mappings[level+1] += LPAE_ENTRIES;
-
return P2M_ONE_DESCEND;
}
}
@@ -660,6 +770,7 @@ static int apply_one_level(struct domain *d,
memset(&pte, 0x00, sizeof(pte));
p2m_write_pte(entry, pte, flush_cache);
+ radix_tree_delete(&p2m->mem_access_settings, paddr_to_pfn(*addr));
*addr += level_size;
*maddr += level_size;
@@ -704,6 +815,49 @@ static int apply_one_level(struct domain *d,
*addr += PAGE_SIZE;
return P2M_ONE_PROGRESS_NOP;
}
+
+ case MEMACCESS:
+ if ( level < 3 )
+ {
+ if ( !p2m_valid(orig_pte) )
+ {
+ *addr += level_size;
+ return P2M_ONE_PROGRESS_NOP;
+ }
+
+ /* Shatter large pages as we descend */
+ if ( p2m_mapping(orig_pte) )
+ {
+ rc = p2m_shatter_page(d, entry, level, level_shift, flush_cache);
+
+ if ( rc < 0 )
+ return rc;
+ } /* else: an existing table mapping -> descend */
+
+ return P2M_ONE_DESCEND;
+ }
+ else
+ {
+ pte = orig_pte;
+
+ if ( !p2m_table(pte) )
+ pte.bits = 0;
+
+ if ( p2m_valid(pte) )
+ {
+ ASSERT(pte.p2m.type != p2m_invalid);
+
+ rc = p2m_mem_access_radix_set(p2m, paddr_to_pfn(*addr), a);
+ if ( rc < 0 )
+ return rc;
+
+ p2m_set_permission(&pte, pte.p2m.type, a);
+ p2m_write_pte(entry, pte, flush_cache);
+ }
+
+ *addr += level_size;
+ return P2M_ONE_PROGRESS;
+ }
}
BUG(); /* Should never get here */
@@ -726,7 +880,9 @@ static int apply_p2m_changes(struct domain *d,
unsigned int level = 0;
unsigned long cur_first_page = ~0,
cur_first_offset = ~0,
- cur_second_offset = ~0;
+ cur_second_offset = ~0,
+ start_gpfn = paddr_to_pfn(start_gpaddr),
+ end_gpfn = paddr_to_pfn(end_gpaddr);
unsigned long count = 0;
bool_t flush = false;
bool_t flush_pt;
@@ -761,6 +917,20 @@ static int apply_p2m_changes(struct domain *d,
count = 0;
}
+ /*
+ * Preempt setting mem_access permissions as required by XSA-89,
+ * if it's not the last iteration. */
+ if ( op == MEMACCESS && count )
+ {
+ int progress = paddr_to_pfn(addr) - start_gpfn + 1;
+ if ( (end_gpfn-start_gpfn) > progress && !(progress & mask)
+ && hypercall_preempt_check() )
+ {
+ rc = progress;
+ goto out;
+ }
+ }
+
if ( cur_first_page != p2m_first_level_index(addr) )
{
if ( first ) unmap_domain_page(first);
@@ -1159,6 +1329,236 @@ err:
return page;
}
+bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec)
+{
+ struct vcpu *v = current;
+ struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
+ mem_event_request_t *req = NULL;
+ xenmem_access_t xma;
+ bool_t violation;
+ int rc;
+
+ rc = p2m_get_mem_access(v->domain, paddr_to_pfn(gpa), &xma);
+ if ( rc )
+ {
+ /* No setting was found, reinject */
+ return 1;
+ }
+ else
+ {
+ /* First, handle rx2rw and n2rwx conversion automatically. */
+ if ( npfec.write_access && xma == XENMEM_access_rx2rw )
+ {
+ rc = p2m_set_mem_access(v->domain, paddr_to_pfn(gpa), 1,
+ 0, ~0, XENMEM_access_rw);
+ return 0;
+ }
+ else if ( xma == XENMEM_access_n2rwx )
+ {
+ rc = p2m_set_mem_access(v->domain, paddr_to_pfn(gpa), 1,
+ 0, ~0, XENMEM_access_rwx);
+ }
+ }
+
+ /* Otherwise, check if there is a memory event listener, and send the message along */
+ if ( !mem_event_check_ring( &v->domain->mem_event->access ) )
+ {
+ /* No listener */
+ if ( p2m->access_required )
+ {
+ gdprintk(XENLOG_INFO, "Memory access permissions failure, "
+ "no mem_event listener VCPU %d, dom %d\n",
+ v->vcpu_id, v->domain->domain_id);
+ domain_crash(v->domain);
+ }
+ else
+ {
+ /* n2rwx was already handled */
+ if ( xma != XENMEM_access_n2rwx)
+ {
+ /* A listener is not required, so clear the access
+ * restrictions. */
+ rc = p2m_set_mem_access(v->domain, paddr_to_pfn(gpa), 1,
+ 0, ~0, XENMEM_access_rwx);
+ }
+ }
+
+ /* No need to reinject */
+ return 0;
+ }
+
+ switch ( xma )
+ {
+ default:
+ case XENMEM_access_n:
+ violation = npfec.read_access || npfec.write_access || npfec.insn_fetch;
+ break;
+ case XENMEM_access_r:
+ violation = npfec.write_access || npfec.insn_fetch;
+ break;
+ case XENMEM_access_w:
+ violation = npfec.read_access || npfec.insn_fetch;
+ break;
+ case XENMEM_access_x:
+ violation = npfec.read_access || npfec.write_access;
+ break;
+ case XENMEM_access_rx:
+ violation = npfec.write_access;
+ break;
+ case XENMEM_access_wx:
+ violation = npfec.read_access;
+ break;
+ case XENMEM_access_rw:
+ violation = npfec.insn_fetch;
+ break;
+ case XENMEM_access_rwx:
+ violation = 0;
+ break;
+ }
+
+ if ( !violation )
+ return 1;
+
+ req = xzalloc(mem_event_request_t);
+ if ( req )
+ {
+ req->reason = MEM_EVENT_REASON_VIOLATION;
+ if ( xma != XENMEM_access_n2rwx )
+ req->flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+ req->gfn = gpa >> PAGE_SHIFT;
+ req->offset = gpa & ((1 << PAGE_SHIFT) - 1);
+ req->gla = gla;
+ req->gla_valid = npfec.gla_valid;
+ req->access_r = npfec.read_access;
+ req->access_w = npfec.write_access;
+ req->access_x = npfec.insn_fetch;
+ if ( npfec_kind_in_gpt == npfec.kind )
+ req->fault_in_gpt = 1;
+ if ( npfec_kind_with_gla == npfec.kind )
+ req->fault_with_gla = 1;
+ req->vcpu_id = v->vcpu_id;
+
+ mem_access_send_req(v->domain, req);
+ xfree(req);
+ }
+
+ /* Pause the current VCPU */
+ if ( xma != XENMEM_access_n2rwx )
+ mem_event_vcpu_pause(v);
+
+ return 0;
+}
+
+/* Set access type for a region of pfns.
+ * If start_pfn == -1ul, sets the default access type */
+long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr,
+ uint32_t start, uint32_t mask, xenmem_access_t access)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ p2m_access_t a;
+ long rc = 0;
+
+ static const p2m_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
+ ACCESS(n),
+ ACCESS(r),
+ ACCESS(w),
+ ACCESS(rw),
+ ACCESS(x),
+ ACCESS(rx),
+ ACCESS(wx),
+ ACCESS(rwx),
+#undef ACCESS
+ };
+
+ switch ( access )
+ {
+ case 0 ... ARRAY_SIZE(memaccess) - 1:
+ a = memaccess[access];
+ break;
+ case XENMEM_access_default:
+ a = p2m->default_access;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* If request to set default access. */
+ if ( pfn == ~0ul )
+ {
+ p2m->default_access = a;
+ return 0;
+ }
+
+ rc = apply_p2m_changes(d, MEMACCESS,
+ pfn_to_paddr(pfn+start), pfn_to_paddr(pfn+nr),
+ 0, MATTR_MEM, mask, 0, a);
+
+ if ( rc < 0 )
+ return rc;
+ else if ( rc > 0 )
+ return start+rc;
+
+ flush_tlb_domain(d);
+ return 0;
+}
+
+int p2m_get_mem_access(struct domain *d, unsigned long gpfn,
+ xenmem_access_t *access)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ void *i;
+ unsigned int index;
+
+ static const xenmem_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = XENMEM_access_##ac
+ ACCESS(n),
+ ACCESS(r),
+ ACCESS(w),
+ ACCESS(rw),
+ ACCESS(x),
+ ACCESS(rx),
+ ACCESS(wx),
+ ACCESS(rwx),
+#undef ACCESS
+ };
+
+ /* If request to get default access */
+ if ( gpfn == ~0ull )
+ {
+ *access = memaccess[p2m->default_access];
+ return 0;
+ }
+
+ spin_lock(&p2m->lock);
+ i = radix_tree_lookup(&p2m->mem_access_settings, gpfn);
+ spin_unlock(&p2m->lock);
+
+ if ( !i )
+ {
+ /* No setting was found in the Radix tree. Check if the
+ * entry exists in the page-tables. */
+ paddr_t maddr = p2m_lookup(d, gpfn << PAGE_SHIFT, NULL);
+ if ( INVALID_PADDR == maddr )
+ {
+ return -ESRCH;
+ }
+
+ /* If entry exists then its rwx. */
+ *access = XENMEM_access_rwx;
+ }
+ else
+ {
+ index = radix_tree_ptr_to_int(i);
+
+ if ( index >= ARRAY_SIZE(memaccess) )
+ return -ERANGE;
+
+ *access = memaccess[index];
+ }
+ return 0;
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 019991f..5bfcdf3 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -1852,11 +1852,33 @@ static void do_trap_data_abort_guest(struct cpu_user_regs *regs,
info.gva = READ_SYSREG64(FAR_EL2);
#endif
- if (dabt.s1ptw)
+ rc = gva_to_ipa(info.gva, &info.gpa);
+ if ( -EFAULT == rc )
goto bad_data_abort;
- rc = gva_to_ipa(info.gva, &info.gpa);
- if ( rc == -EFAULT )
+ switch ( dabt.dfsc )
+ {
+ case DABT_DFSC_PERMISSION_1:
+ case DABT_DFSC_PERMISSION_2:
+ case DABT_DFSC_PERMISSION_3:
+ {
+ const struct npfec npfec = {
+ .read_access = 1,
+ .write_access = dabt.write,
+ .gla_valid = 1,
+ .kind = dabt.s1ptw ? npfec_kind_in_gpt : npfec_kind_with_gla
+ };
+
+ rc = p2m_mem_access_check(info.gpa, info.gva, npfec);
+
+ /* Trap was triggered by mem_access, work here is done */
+ if ( !rc )
+ return;
+ }
+ break;
+ }
+
+ if ( dabt.s1ptw )
goto bad_data_abort;
/* XXX: Decode the instruction if ISS is not valid */
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 89e653c..54a7950 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -3,6 +3,7 @@
#include <xen/mm.h>
#include <xen/radix-tree.h>
+#include <public/memory.h>
#include <xen/p2m-common.h>
@@ -248,6 +249,23 @@ static inline bool_t p2m_mem_event_sanity_check(struct domain *d)
return 1;
}
+/* Send mem event based on the access (gla is -1ull if not available). Boolean
+ * return value indicates if trap needs to be injected into guest. */
+bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec);
+
+/* Resumes the running of the VCPU, restarting the last instruction */
+void p2m_mem_access_resume(struct domain *d);
+
+/* Set access type for a region of pfns.
+ * If start_pfn == -1ul, sets the default access type */
+long p2m_set_mem_access(struct domain *d, unsigned long start_pfn, uint32_t nr,
+ uint32_t start, uint32_t mask, xenmem_access_t access);
+
+/* Get access type for a pfn
+ * If pfn == -1ul, gets the default access type */
+int p2m_get_mem_access(struct domain *d, unsigned long pfn,
+ xenmem_access_t *access);
+
#endif /* _XEN_P2M_H */
/*
diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h
index 0cc5b6d..b844f1d 100644
--- a/xen/include/asm-arm/processor.h
+++ b/xen/include/asm-arm/processor.h
@@ -262,6 +262,36 @@ enum dabt_size {
DABT_DOUBLE_WORD = 3,
};
+/* Data abort data fetch status codes */
+enum dabt_dfsc {
+ DABT_DFSC_ADDR_SIZE_0 = 0b000000,
+ DABT_DFSC_ADDR_SIZE_1 = 0b000001,
+ DABT_DFSC_ADDR_SIZE_2 = 0b000010,
+ DABT_DFSC_ADDR_SIZE_3 = 0b000011,
+ DABT_DFSC_TRANSLATION_0 = 0b000100,
+ DABT_DFSC_TRANSLATION_1 = 0b000101,
+ DABT_DFSC_TRANSLATION_2 = 0b000110,
+ DABT_DFSC_TRANSLATION_3 = 0b000111,
+ DABT_DFSC_ACCESS_1 = 0b001001,
+ DABT_DFSC_ACCESS_2 = 0b001010,
+ DABT_DFSC_ACCESS_3 = 0b001011,
+ DABT_DFSC_PERMISSION_1 = 0b001101,
+ DABT_DFSC_PERMISSION_2 = 0b001110,
+ DABT_DFSC_PERMISSION_3 = 0b001111,
+ DABT_DFSC_SYNC_EXT = 0b010000,
+ DABT_DFSC_SYNC_PARITY = 0b011000,
+ DABT_DFSC_SYNC_EXT_TTW_0 = 0b010100,
+ DABT_DFSC_SYNC_EXT_TTW_1 = 0b010101,
+ DABT_DFSC_SYNC_EXT_TTW_2 = 0b010110,
+ DABT_DFSC_SYNC_EXT_TTW_3 = 0b010111,
+ DABT_DFSC_SYNC_PARITY_TTW_0 = 0b011100,
+ DABT_DFSC_SYNC_PARITY_TTW_1 = 0b011101,
+ DABT_DFSC_SYNC_PARITY_TTW_2 = 0b011110,
+ DABT_DFSC_SYNC_PARITY_TTW_3 = 0b011111,
+ DABT_DFSC_ALIGNMENT = 0b100001,
+ DABT_DFSC_TLB_CONFLICT = 0b110000,
+};
+
union hsr {
uint32_t bits;
struct {
--
2.1.0
next prev parent reply other threads:[~2014-09-15 14:02 UTC|newest]
Thread overview: 47+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-09-15 14:02 [PATCH for-4.5 v6 00/17] Mem_event and mem_access for ARM Tamas K Lengyel
2014-09-15 14:02 ` [PATCH for-4.5 v6 01/17] xen: Relocate mem_access and mem_event into common Tamas K Lengyel
2014-09-15 14:02 ` [PATCH for-4.5 v6 02/17] xen: Relocate p2m_mem_access_resume to mem_access common Tamas K Lengyel
2014-09-15 14:02 ` [PATCH for-4.5 v6 03/17] xen: Relocate struct npfec definition into common Tamas K Lengyel
2014-09-15 14:02 ` [PATCH for-4.5 v6 04/17] xen: Relocate mem_event_op domctl and access_op memop " Tamas K Lengyel
2014-09-15 14:02 ` [PATCH for-4.5 v6 05/17] x86/p2m: Typo fix for spelling ambiguous Tamas K Lengyel
2014-09-15 14:02 ` [PATCH for-4.5 v6 06/17] xen/mem_event: Clean out superfluous white-spaces Tamas K Lengyel
2014-09-15 14:02 ` [PATCH for-4.5 v6 07/17] xen/mem_event: Relax error condition on debug builds Tamas K Lengyel
2014-09-15 14:02 ` [PATCH for-4.5 v6 08/17] xen/mem_event: Abstract architecture specific sanity checks Tamas K Lengyel
2014-09-15 14:02 ` [PATCH for-4.5 v6 09/17] xen/mem_access: Abstract architecture specific sanity check Tamas K Lengyel
2014-09-15 14:02 ` [PATCH for-4.5 v6 10/17] xen/arm: p2m type definitions and changes Tamas K Lengyel
2014-09-15 22:35 ` Ian Campbell
2014-09-16 8:49 ` Tamas K Lengyel
2014-09-16 13:27 ` Ian Campbell
2014-09-16 20:38 ` Julien Grall
2014-09-16 21:52 ` Tamas K Lengyel
2014-09-15 14:02 ` [PATCH for-4.5 v6 11/17] xen/arm: Add set access required domctl Tamas K Lengyel
2014-09-15 22:37 ` Ian Campbell
2014-09-16 8:37 ` Tamas K Lengyel
2014-09-15 22:38 ` Ian Campbell
2014-09-16 8:33 ` Tamas K Lengyel
2014-09-16 13:25 ` Ian Campbell
2014-09-15 14:02 ` [PATCH for-4.5 v6 12/17] xen/arm: Implement domain_get_maximum_gpfn Tamas K Lengyel
2014-09-15 22:39 ` Ian Campbell
2014-09-16 8:02 ` Tamas K Lengyel
2014-09-16 16:44 ` Ian Campbell
2014-09-16 17:09 ` Tamas K Lengyel
2014-09-15 14:02 ` Tamas K Lengyel [this message]
2014-09-15 22:53 ` [PATCH for-4.5 v6 13/17] xen/arm: Data abort exception (R/W) mem_events Ian Campbell
[not found] ` <CAErYnshu0vJJMxWwu4eo2MZf=q_g2H123p6VUk_4a9f12vYLjg@mail.gmail.com>
2014-09-16 10:07 ` Tamas K Lengyel
2014-09-16 16:50 ` Ian Campbell
2014-09-16 17:08 ` Tamas K Lengyel
2014-09-18 18:54 ` Ian Campbell
2014-09-18 20:09 ` Tamas K Lengyel
2014-09-19 9:05 ` Tamas K Lengyel
2014-09-22 9:11 ` Ian Campbell
2014-09-22 17:18 ` Tamas K Lengyel
2014-09-15 14:02 ` [PATCH for-4.5 v6 14/17] xen/arm: Instruction prefetch abort (X) mem_event handling Tamas K Lengyel
2014-09-18 18:59 ` Ian Campbell
2014-09-18 20:12 ` Tamas K Lengyel
2014-09-15 14:02 ` [PATCH for-4.5 v6 15/17] xen/arm: Enable the compilation of mem_access and mem_event on ARM Tamas K Lengyel
2014-09-15 14:02 ` [PATCH for-4.5 v6 16/17] tools/libxc: Allocate magic page for mem access " Tamas K Lengyel
2014-09-15 14:02 ` [PATCH for-4.5 v6 17/17] tools/tests: Enable xen-access " Tamas K Lengyel
2014-09-18 19:02 ` Ian Campbell
2014-09-22 18:48 ` Tamas K Lengyel
2014-09-23 12:18 ` Ian Campbell
2014-10-01 17:32 ` Aravindh Puthiyaparambil (aravindp)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1410789775-24197-14-git-send-email-tklengyel@sec.in.tum.de \
--to=tklengyel@sec.in.tum.de \
--cc=andres@lagarcavilla.org \
--cc=dgdegra@tycho.nsa.gov \
--cc=ian.campbell@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=julien.grall@linaro.org \
--cc=stefano.stabellini@citrix.com \
--cc=tim@xen.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).