From: Tamas K Lengyel <tklengyel@sec.in.tum.de>
To: xen-devel@lists.xen.org
Cc: ian.campbell@citrix.com, tim@xen.org, julien.grall@linaro.org,
ian.jackson@eu.citrix.com, stefano.stabellini@citrix.com,
andres@lagarcavilla.org, jbeulich@suse.com,
dgdegra@tycho.nsa.gov, Tamas K Lengyel <tklengyel@sec.in.tum.de>
Subject: [PATCH v5 12/17] xen/arm: Data abort exception (R/W) mem_events.
Date: Wed, 10 Sep 2014 15:28:41 +0200 [thread overview]
Message-ID: <1410355726-5599-13-git-send-email-tklengyel@sec.in.tum.de> (raw)
In-Reply-To: <1410355726-5599-1-git-send-email-tklengyel@sec.in.tum.de>
This patch enables to store, set, check and deliver LPAE R/W mem_events.
As the LPAE PTE's lack enough available software programmable bits,
we store the permissions in a Radix tree, where the key is the pfn
of a 4k page. Only settings other than p2m_access_rwx are saved
in the Radix tree.
Signed-off-by: Tamas K Lengyel <tklengyel@sec.in.tum.de>
---
v5: - Move p2m_set_entry's logic into apply_one_level via
a new p2m_op, MEMACCESS.
v4: - Add p2m_mem_access_radix_set function to be called
when inserting new PTE's and when updating existing entries.
- Switch p2m_mem_access_check to return bool_t.
- Use new struct npfec to pass violation info.
v3: - Add new function for updating the PTE entries, p2m_set_entry.
- Use the new struct npfec to pass violation information.
- Implement n2rwx, rx2rw and listener required routines.
v2: - Patch been split to ease the review process.
- Add definitions of data abort data fetch status codes (enum dabt_dfsc)
and only call p2m_mem_access_check for traps caused by permission violations.
- Only call p2m_write_pte in p2m_lookup if the PTE permission actually changed.
- Properly save settings in the Radix tree and pause the VCPU with
mem_event_vcpu_pause.
---
xen/arch/arm/p2m.c | 412 +++++++++++++++++++++++++++++++++++++---
xen/arch/arm/traps.c | 31 ++-
xen/include/asm-arm/p2m.h | 17 ++
xen/include/asm-arm/processor.h | 30 +++
4 files changed, 456 insertions(+), 34 deletions(-)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index e6b4bb6..e089d52 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -152,6 +152,74 @@ static lpae_t *p2m_map_first(struct p2m_domain *p2m, paddr_t addr)
return __map_domain_page(page);
}
+static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a)
+{
+ /* First apply type permissions */
+ switch ( t )
+ {
+ case p2m_ram_rw:
+ e->p2m.xn = 0;
+ e->p2m.write = 1;
+ break;
+
+ case p2m_ram_ro:
+ e->p2m.xn = 0;
+ e->p2m.write = 0;
+ break;
+
+ case p2m_iommu_map_rw:
+ case p2m_map_foreign:
+ case p2m_grant_map_rw:
+ case p2m_mmio_direct:
+ e->p2m.xn = 1;
+ e->p2m.write = 1;
+ break;
+
+ case p2m_iommu_map_ro:
+ case p2m_grant_map_ro:
+ case p2m_invalid:
+ e->p2m.xn = 1;
+ e->p2m.write = 0;
+ break;
+
+ case p2m_max_real_type:
+ BUG();
+ break;
+ }
+
+ /* Then restrict with access permissions */
+ switch ( a )
+ {
+ case p2m_access_n:
+ e->p2m.read = e->p2m.write = 0;
+ e->p2m.xn = 1;
+ break;
+ case p2m_access_r:
+ e->p2m.write = 0;
+ e->p2m.xn = 1;
+ break;
+ case p2m_access_x:
+ e->p2m.write = 0;
+ e->p2m.read = 0;
+ break;
+ case p2m_access_rx:
+ e->p2m.write = 0;
+ break;
+ case p2m_access_w:
+ e->p2m.read = 0;
+ e->p2m.xn = 1;
+ break;
+ case p2m_access_rw:
+ e->p2m.xn = 1;
+ break;
+ case p2m_access_wx:
+ e->p2m.read = 0;
+ break;
+ case p2m_access_rwx:
+ break;
+ }
+}
+
/*
* Lookup the MFN corresponding to a domain's PFN.
*
@@ -262,37 +330,7 @@ static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr,
break;
}
- switch (t)
- {
- case p2m_ram_rw:
- e.p2m.xn = 0;
- e.p2m.write = 1;
- break;
-
- case p2m_ram_ro:
- e.p2m.xn = 0;
- e.p2m.write = 0;
- break;
-
- case p2m_iommu_map_rw:
- case p2m_map_foreign:
- case p2m_grant_map_rw:
- case p2m_mmio_direct:
- e.p2m.xn = 1;
- e.p2m.write = 1;
- break;
-
- case p2m_iommu_map_ro:
- case p2m_grant_map_ro:
- case p2m_invalid:
- e.p2m.xn = 1;
- e.p2m.write = 0;
- break;
-
- case p2m_max_real_type:
- BUG();
- break;
- }
+ p2m_set_permission(&e, t, a);
ASSERT(!(pa & ~PAGE_MASK));
ASSERT(!(pa & ~PADDR_MASK));
@@ -384,6 +422,7 @@ enum p2m_operation {
REMOVE,
RELINQUISH,
CACHEFLUSH,
+ MEMACCESS,
};
/* Put any references on the single 4K page referenced by pte. TODO:
@@ -444,6 +483,33 @@ static bool_t is_mapping_aligned(const paddr_t start_gpaddr,
return true;
}
+static long p2m_mem_access_radix_set(struct p2m_domain *p2m, unsigned long pfn,
+ p2m_access_t a)
+{
+ long rc;
+
+ if ( p2m_access_rwx == a )
+ {
+ radix_tree_delete(&p2m->mem_access_settings, pfn);
+ return 0;
+ }
+
+ rc = radix_tree_insert(&p2m->mem_access_settings, pfn,
+ radix_tree_int_to_ptr(a));
+
+ if ( -EEXIST == rc )
+ {
+ /* If a setting existed already, change it to the new one */
+ radix_tree_replace_slot(
+ radix_tree_lookup_slot(
+ &p2m->mem_access_settings, pfn),
+ radix_tree_int_to_ptr(a));
+ rc = 0;
+ }
+
+ return rc;
+}
+
#define P2M_ONE_DESCEND 0
#define P2M_ONE_PROGRESS_NOP 0x1
#define P2M_ONE_PROGRESS 0x10
@@ -504,6 +570,10 @@ static int apply_one_level(struct domain *d,
page = alloc_domheap_pages(d, level_shift - PAGE_SHIFT, 0);
if ( page )
{
+ rc = p2m_mem_access_radix_set(p2m, paddr_to_pfn(*addr), a);
+ if ( rc < 0 )
+ return rc;
+
pte = mfn_to_p2m_entry(page_to_mfn(page), mattr, t, a);
if ( level < 3 )
pte.p2m.table = 0;
@@ -538,6 +608,10 @@ static int apply_one_level(struct domain *d,
/* We do not handle replacing an existing table with a superpage */
(level == 3 || !p2m_table(orig_pte)) )
{
+ rc = p2m_mem_access_radix_set(p2m, paddr_to_pfn(*addr), a);
+ if ( rc < 0 )
+ return rc;
+
/* New mapping is superpage aligned, make it */
pte = mfn_to_p2m_entry(*maddr >> PAGE_SHIFT, mattr, t, a);
if ( level < 3 )
@@ -663,6 +737,7 @@ static int apply_one_level(struct domain *d,
memset(&pte, 0x00, sizeof(pte));
p2m_write_pte(entry, pte, flush_cache);
+ radix_tree_delete(&p2m->mem_access_settings, paddr_to_pfn(*addr));
*addr += level_size;
*maddr += level_size;
@@ -707,6 +782,53 @@ static int apply_one_level(struct domain *d,
*addr += PAGE_SIZE;
return P2M_ONE_PROGRESS_NOP;
}
+
+ case MEMACCESS:
+ if ( level < 3 )
+ {
+ if ( !p2m_valid(orig_pte) )
+ {
+ (*addr)++;
+ return P2M_ONE_PROGRESS_NOP;
+ }
+
+ /* Shatter large pages as we descend */
+ if ( p2m_mapping(orig_pte) )
+ {
+ rc = p2m_create_table(d, entry,
+ level_shift - PAGE_SHIFT, flush_cache);
+ if ( rc < 0 )
+ return rc;
+
+ p2m->stats.shattered[level]++;
+ p2m->stats.mappings[level]--;
+ p2m->stats.mappings[level+1] += LPAE_ENTRIES;
+ } /* else: an existing table mapping -> descend */
+
+ return P2M_ONE_DESCEND;
+ }
+ else
+ {
+ pte = orig_pte;
+
+ if ( !p2m_table(pte) )
+ pte.bits = 0;
+
+ if ( p2m_valid(pte) )
+ {
+ ASSERT(pte.p2m.type != p2m_invalid);
+
+ rc = p2m_mem_access_radix_set(p2m, paddr_to_pfn(*addr), a);
+ if ( rc < 0 )
+ return rc;
+
+ p2m_set_permission(&pte, pte.p2m.type, a);
+ p2m_write_pte(entry, pte, flush_cache);
+ }
+
+ (*addr)++;
+ return P2M_ONE_PROGRESS;
+ }
}
BUG(); /* Should never get here */
@@ -1135,6 +1257,234 @@ err:
return page;
}
+bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, struct npfec npfec)
+{
+ struct vcpu *v = current;
+ struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
+ mem_event_request_t *req = NULL;
+ xenmem_access_t xma;
+ bool_t violation;
+ int rc;
+
+ rc = p2m_get_mem_access(v->domain, paddr_to_pfn(gpa), &xma);
+ if ( rc )
+ {
+ /* No setting was found, reinject */
+ return 1;
+ }
+ else
+ {
+ /* First, handle rx2rw and n2rwx conversion automatically. */
+ if ( npfec.write_access && xma == XENMEM_access_rx2rw )
+ {
+ rc = p2m_set_mem_access(v->domain, paddr_to_pfn(gpa), 1,
+ 0, ~0, XENMEM_access_rw);
+ return 0;
+ }
+ else if ( xma == XENMEM_access_n2rwx )
+ {
+ rc = p2m_set_mem_access(v->domain, paddr_to_pfn(gpa), 1,
+ 0, ~0, XENMEM_access_rwx);
+ }
+ }
+
+ /* Otherwise, check if there is a memory event listener, and send the message along */
+ if ( !mem_event_check_ring( &v->domain->mem_event->access ) )
+ {
+ /* No listener */
+ if ( p2m->access_required )
+ {
+ gdprintk(XENLOG_INFO, "Memory access permissions failure, "
+ "no mem_event listener VCPU %d, dom %d\n",
+ v->vcpu_id, v->domain->domain_id);
+ domain_crash(v->domain);
+ }
+ else
+ {
+ /* n2rwx was already handled */
+ if ( xma != XENMEM_access_n2rwx)
+ {
+ /* A listener is not required, so clear the access
+ * restrictions. */
+ rc = p2m_set_mem_access(v->domain, paddr_to_pfn(gpa), 1,
+ 0, ~0, XENMEM_access_rwx);
+ }
+ }
+
+ /* No need to reinject */
+ return 0;
+ }
+
+ switch ( xma )
+ {
+ default:
+ case XENMEM_access_n:
+ violation = npfec.read_access || npfec.write_access || npfec.insn_fetch;
+ break;
+ case XENMEM_access_r:
+ violation = npfec.write_access || npfec.insn_fetch;
+ break;
+ case XENMEM_access_w:
+ violation = npfec.read_access || npfec.insn_fetch;
+ break;
+ case XENMEM_access_x:
+ violation = npfec.read_access || npfec.write_access;
+ break;
+ case XENMEM_access_rx:
+ violation = npfec.write_access;
+ break;
+ case XENMEM_access_wx:
+ violation = npfec.read_access;
+ break;
+ case XENMEM_access_rw:
+ violation = npfec.insn_fetch;
+ break;
+ case XENMEM_access_rwx:
+ violation = 0;
+ break;
+ }
+
+ if ( !violation )
+ return 1;
+
+ req = xzalloc(mem_event_request_t);
+ if ( req )
+ {
+ req->reason = MEM_EVENT_REASON_VIOLATION;
+ if ( xma != XENMEM_access_n2rwx )
+ req->flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
+ req->gfn = gpa >> PAGE_SHIFT;
+ req->offset = gpa & ((1 << PAGE_SHIFT) - 1);
+ req->gla = gla;
+ req->gla_valid = npfec.gla_valid;
+ req->access_r = npfec.read_access;
+ req->access_w = npfec.write_access;
+ req->access_x = npfec.insn_fetch;
+ if ( npfec_kind_in_gpt == npfec.kind )
+ req->fault_in_gpt = 1;
+ if ( npfec_kind_with_gla == npfec.kind )
+ req->fault_with_gla = 1;
+ req->vcpu_id = v->vcpu_id;
+
+ mem_access_send_req(v->domain, req);
+ xfree(req);
+ }
+
+ /* Pause the current VCPU */
+ if ( xma != XENMEM_access_n2rwx )
+ mem_event_vcpu_pause(v);
+
+ return 0;
+}
+
+/* Set access type for a region of pfns.
+ * If start_pfn == -1ul, sets the default access type */
+long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr,
+ uint32_t start, uint32_t mask, xenmem_access_t access)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ p2m_access_t a;
+ long rc = 0;
+ paddr_t paddr;
+
+ static const p2m_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
+ ACCESS(n),
+ ACCESS(r),
+ ACCESS(w),
+ ACCESS(rw),
+ ACCESS(x),
+ ACCESS(rx),
+ ACCESS(wx),
+ ACCESS(rwx),
+#undef ACCESS
+ };
+
+ switch ( access )
+ {
+ case 0 ... ARRAY_SIZE(memaccess) - 1:
+ a = memaccess[access];
+ break;
+ case XENMEM_access_default:
+ a = p2m->default_access;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* If request to set default access */
+ if ( pfn == ~0ul )
+ {
+ p2m->default_access = a;
+ return 0;
+ }
+
+ for ( pfn += start; nr > start; ++pfn )
+ {
+ paddr = pfn_to_paddr(pfn);
+ rc = apply_p2m_changes(d, MEMACCESS, paddr, paddr+1, 0, MATTR_MEM, 0, a);
+ if ( rc < 0 )
+ break;
+
+ /* Check for continuation if it's not the last iteration. */
+ if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
+ {
+ rc = start;
+ break;
+ }
+ }
+
+ /* Flush the TLB of the domain to ensure consistency */
+ flush_tlb_domain(d);
+
+ return rc;
+}
+
+int p2m_get_mem_access(struct domain *d, unsigned long gpfn,
+ xenmem_access_t *access)
+{
+ struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ void *i;
+ unsigned int index;
+
+ static const xenmem_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = XENMEM_access_##ac
+ ACCESS(n),
+ ACCESS(r),
+ ACCESS(w),
+ ACCESS(rw),
+ ACCESS(x),
+ ACCESS(rx),
+ ACCESS(wx),
+ ACCESS(rwx),
+#undef ACCESS
+ };
+
+ /* If request to get default access */
+ if ( gpfn == ~0ull )
+ {
+ *access = memaccess[p2m->default_access];
+ return 0;
+ }
+
+ spin_lock(&p2m->lock);
+
+ i = radix_tree_lookup(&p2m->mem_access_settings, gpfn);
+
+ spin_unlock(&p2m->lock);
+
+ if ( !i )
+ return -ESRCH;
+
+ index = radix_tree_ptr_to_int(i);
+
+ if ( index >= ARRAY_SIZE(memaccess) )
+ return -ERANGE;
+
+ *access = memaccess[index];
+ return 0;
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 019991f..9d93ed8 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -1852,11 +1852,36 @@ static void do_trap_data_abort_guest(struct cpu_user_regs *regs,
info.gva = READ_SYSREG64(FAR_EL2);
#endif
- if (dabt.s1ptw)
+ rc = gva_to_ipa(info.gva, &info.gpa);
+ if ( -EFAULT == rc )
goto bad_data_abort;
- rc = gva_to_ipa(info.gva, &info.gpa);
- if ( rc == -EFAULT )
+ switch ( dabt.dfsc )
+ {
+ case DABT_DFSC_PERMISSION_1:
+ case DABT_DFSC_PERMISSION_2:
+ case DABT_DFSC_PERMISSION_3:
+ {
+ struct npfec npfec = {
+ .read_access = 1,
+ .write_access = dabt.write,
+ .gla_valid = 1,
+ .kind = dabt.s1ptw ? npfec_kind_in_gpt : npfec_kind_with_gla
+ };
+
+ rc = p2m_mem_access_check(info.gpa, info.gva, npfec);
+
+ /* Trap was triggered by mem_access, work here is done */
+ if ( !rc )
+ return;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if ( dabt.s1ptw )
goto bad_data_abort;
/* XXX: Decode the instruction if ISS is not valid */
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index b4ca86d..85ce984 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -246,6 +246,23 @@ static inline bool_t p2m_mem_event_sanity_check(struct domain *d)
return 1;
}
+/* Send mem event based on the access (gla is -1ull if not available). Boolean
+ * return value indicates if trap needs to be injected into guest. */
+bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla, struct npfec npfec);
+
+/* Resumes the running of the VCPU, restarting the last instruction */
+void p2m_mem_access_resume(struct domain *d);
+
+/* Set access type for a region of pfns.
+ * If start_pfn == -1ul, sets the default access type */
+long p2m_set_mem_access(struct domain *d, unsigned long start_pfn, uint32_t nr,
+ uint32_t start, uint32_t mask, xenmem_access_t access);
+
+/* Get access type for a pfn
+ * If pfn == -1ul, gets the default access type */
+int p2m_get_mem_access(struct domain *d, unsigned long pfn,
+ xenmem_access_t *access);
+
#endif /* _XEN_P2M_H */
/*
diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h
index 0cc5b6d..b844f1d 100644
--- a/xen/include/asm-arm/processor.h
+++ b/xen/include/asm-arm/processor.h
@@ -262,6 +262,36 @@ enum dabt_size {
DABT_DOUBLE_WORD = 3,
};
+/* Data abort data fetch status codes */
+enum dabt_dfsc {
+ DABT_DFSC_ADDR_SIZE_0 = 0b000000,
+ DABT_DFSC_ADDR_SIZE_1 = 0b000001,
+ DABT_DFSC_ADDR_SIZE_2 = 0b000010,
+ DABT_DFSC_ADDR_SIZE_3 = 0b000011,
+ DABT_DFSC_TRANSLATION_0 = 0b000100,
+ DABT_DFSC_TRANSLATION_1 = 0b000101,
+ DABT_DFSC_TRANSLATION_2 = 0b000110,
+ DABT_DFSC_TRANSLATION_3 = 0b000111,
+ DABT_DFSC_ACCESS_1 = 0b001001,
+ DABT_DFSC_ACCESS_2 = 0b001010,
+ DABT_DFSC_ACCESS_3 = 0b001011,
+ DABT_DFSC_PERMISSION_1 = 0b001101,
+ DABT_DFSC_PERMISSION_2 = 0b001110,
+ DABT_DFSC_PERMISSION_3 = 0b001111,
+ DABT_DFSC_SYNC_EXT = 0b010000,
+ DABT_DFSC_SYNC_PARITY = 0b011000,
+ DABT_DFSC_SYNC_EXT_TTW_0 = 0b010100,
+ DABT_DFSC_SYNC_EXT_TTW_1 = 0b010101,
+ DABT_DFSC_SYNC_EXT_TTW_2 = 0b010110,
+ DABT_DFSC_SYNC_EXT_TTW_3 = 0b010111,
+ DABT_DFSC_SYNC_PARITY_TTW_0 = 0b011100,
+ DABT_DFSC_SYNC_PARITY_TTW_1 = 0b011101,
+ DABT_DFSC_SYNC_PARITY_TTW_2 = 0b011110,
+ DABT_DFSC_SYNC_PARITY_TTW_3 = 0b011111,
+ DABT_DFSC_ALIGNMENT = 0b100001,
+ DABT_DFSC_TLB_CONFLICT = 0b110000,
+};
+
union hsr {
uint32_t bits;
struct {
--
2.1.0
next prev parent reply other threads:[~2014-09-10 13:28 UTC|newest]
Thread overview: 48+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-09-10 13:28 [PATCH v5 00/17] Mem_event and mem_access for ARM Tamas K Lengyel
2014-09-10 13:28 ` [PATCH v5 01/17] xen: Relocate mem_access and mem_event into common Tamas K Lengyel
2014-09-10 13:28 ` [PATCH v5 02/17] xen: Relocate p2m_mem_access_resume to mem_access common Tamas K Lengyel
2014-09-11 20:16 ` Julien Grall
2014-09-12 8:56 ` Tamas K Lengyel
2014-09-10 13:28 ` [PATCH v5 03/17] xen: Relocate struct npfec definition into common Tamas K Lengyel
2014-09-10 13:28 ` [PATCH v5 04/17] xen: Relocate mem_event_op domctl and access_op memop " Tamas K Lengyel
2014-09-10 13:44 ` Jan Beulich
2014-09-10 13:28 ` [PATCH v5 05/17] xen/mem_event: Clean out superfluous white-spaces Tamas K Lengyel
2014-09-10 13:28 ` [PATCH v5 06/17] xen/mem_event: Relax error condition on debug builds Tamas K Lengyel
2014-09-10 13:28 ` [PATCH v5 07/17] xen/mem_event: Abstract architecture specific sanity checks Tamas K Lengyel
2014-09-10 13:28 ` [PATCH v5 08/17] xen/mem_access: Abstract architecture specific sanity check Tamas K Lengyel
2014-09-10 13:28 ` [PATCH v5 09/17] xen/arm: p2m type definitions and changes Tamas K Lengyel
2014-09-11 20:25 ` Julien Grall
2014-09-12 8:15 ` Tamas K Lengyel
2014-09-12 19:23 ` Julien Grall
2014-09-12 20:25 ` Tamas K Lengyel
2014-09-11 20:49 ` Julien Grall
2014-09-12 8:31 ` Tamas K Lengyel
2014-09-12 19:41 ` Julien Grall
2014-09-12 20:20 ` Tamas K Lengyel
2014-09-10 13:28 ` [PATCH v5 10/17] xen/arm: Add set access required domctl Tamas K Lengyel
2014-09-11 20:26 ` Julien Grall
2014-09-10 13:28 ` [PATCH v5 11/17] xen/arm: Implement domain_get_maximum_gpfn Tamas K Lengyel
2014-09-11 20:28 ` Julien Grall
2014-09-12 8:58 ` Tamas K Lengyel
2014-09-10 13:28 ` Tamas K Lengyel [this message]
2014-09-11 21:19 ` [PATCH v5 12/17] xen/arm: Data abort exception (R/W) mem_events Julien Grall
2014-09-12 8:46 ` Tamas K Lengyel
2014-09-12 20:35 ` Julien Grall
2014-09-12 20:48 ` Tamas K Lengyel
2014-09-12 21:04 ` Julien Grall
2014-09-10 13:28 ` [PATCH v5 13/17] xen/arm: Instruction prefetch abort (X) mem_event handling Tamas K Lengyel
2014-09-11 21:23 ` Julien Grall
2014-09-12 8:34 ` Tamas K Lengyel
2014-09-10 13:28 ` [PATCH v5 14/17] xen/arm: Enable the compilation of mem_access and mem_event on ARM Tamas K Lengyel
2014-09-10 13:28 ` [PATCH v5 15/17] xen: Extend getdomaininfo to return the domain's max_gpfn Tamas K Lengyel
2014-09-10 13:48 ` Jan Beulich
2014-09-10 13:55 ` Tamas K Lengyel
2014-09-10 13:28 ` [PATCH v5 16/17] tools/libxc: Allocate magic page for mem access on ARM Tamas K Lengyel
2014-09-10 13:28 ` [PATCH v5 17/17] tools/tests: Enable xen-access " Tamas K Lengyel
2014-09-11 21:29 ` Julien Grall
2014-09-12 8:50 ` Tamas K Lengyel
2014-09-12 9:01 ` Tamas K Lengyel
2014-09-10 13:51 ` [PATCH v5 00/17] Mem_event and mem_access for ARM Jan Beulich
2014-09-10 14:01 ` Tamas K Lengyel
2014-09-15 22:26 ` Ian Campbell
2014-09-16 8:00 ` Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1410355726-5599-13-git-send-email-tklengyel@sec.in.tum.de \
--to=tklengyel@sec.in.tum.de \
--cc=andres@lagarcavilla.org \
--cc=dgdegra@tycho.nsa.gov \
--cc=ian.campbell@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=julien.grall@linaro.org \
--cc=stefano.stabellini@citrix.com \
--cc=tim@xen.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).