From: Andres Lagar Cavilla <andres@lagarcavilla.org>
To: Tamas K Lengyel <tamas.lengyel@zentific.com>
Cc: keir@xen.org, Ian Campbell <ian.campbell@citrix.com>,
Tim Deegan <tim@xen.org>, Ian Jackson <ian.jackson@eu.citrix.com>,
Xen-devel <xen-devel@lists.xen.org>,
stefano.stabellini@citrix.com, Jan Beulich <jbeulich@suse.com>,
dgdegra@tycho.nsa.gov
Subject: Re: [PATCH RFC 3/7] xen/arm: Enable the compilation of mem_access and mem_event on ARM.
Date: Mon, 25 Aug 2014 10:25:40 -0700 [thread overview]
Message-ID: <CADzFZPuY_ifdNe+=N6sBcNxuUNfPEPT2TKvk_mcfY9AEVPLdpw@mail.gmail.com> (raw)
In-Reply-To: <1408699832-13325-4-git-send-email-tamas.lengyel@zentific.com>
[-- Attachment #1.1: Type: text/plain, Size: 21823 bytes --]
On Fri, Aug 22, 2014 at 2:30 AM, Tamas K Lengyel <tamas.lengyel@zentific.com
> wrote:
> This patch sets up the infrastructure to support mem_access and mem_event
> on ARM and turns on compilation. We define the required XSM functions,
> handling of domctl copyback, and the required p2m types and stub-functions
> in this patch.
>
> Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>
>
Non-ARM bits LGTM to me. I see here the disablement of CONFIG_X86.
If Xen were to ever support another architecture (hello IA64), it might be
more reasonable to keep an #ifdef CONFIG_X86 && CONFIG_ARM. I don't know
how unlikely that future direction might be.
Andres
> ---
> xen/arch/arm/domctl.c | 36 ++++++++++++++--
> xen/arch/arm/mm.c | 18 ++++++--
> xen/arch/arm/p2m.c | 5 +++
> xen/common/mem_access.c | 6 +--
> xen/common/mem_event.c | 15 +++++--
> xen/include/asm-arm/p2m.h | 100
> ++++++++++++++++++++++++++++++++++---------
> xen/include/xen/mem_access.h | 19 --------
> xen/include/xen/mem_event.h | 53 +++--------------------
> xen/include/xen/sched.h | 1 -
> xen/include/xsm/dummy.h | 24 +++++------
> xen/include/xsm/xsm.h | 25 +++++------
> xen/xsm/dummy.c | 4 +-
> 12 files changed, 178 insertions(+), 128 deletions(-)
>
> diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
> index 45974e7..bb0b8d3 100644
> --- a/xen/arch/arm/domctl.c
> +++ b/xen/arch/arm/domctl.c
> @@ -11,10 +11,17 @@
> #include <xen/sched.h>
> #include <xen/hypercall.h>
> #include <public/domctl.h>
> +#include <asm/guest_access.h>
> +#include <xen/mem_event.h>
> +#include <public/mem_event.h>
>
> long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
> XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
> {
> +
> + long ret;
> + bool_t copyback = 0;
> +
> switch ( domctl->cmd )
> {
> case XEN_DOMCTL_cacheflush:
> @@ -23,17 +30,38 @@ long arch_do_domctl(struct xen_domctl *domctl, struct
> domain *d,
> unsigned long e = s + domctl->u.cacheflush.nr_pfns;
>
> if ( domctl->u.cacheflush.nr_pfns > (1U<<MAX_ORDER) )
> - return -EINVAL;
> + {
> + ret = -EINVAL;
> + break;
> + }
>
> if ( e < s )
> - return -EINVAL;
> + {
> + ret = -EINVAL;
> + break;
> + }
>
> - return p2m_cache_flush(d, s, e);
> + ret = p2m_cache_flush(d, s, e);
> }
> + break;
> +
> + case XEN_DOMCTL_mem_event_op:
> + {
> + ret = mem_event_domctl(d, &domctl->u.mem_event_op,
> + guest_handle_cast(u_domctl, void));
> + copyback = 1;
> + }
> + break;
>
> default:
> - return subarch_do_domctl(domctl, d, u_domctl);
> + ret = subarch_do_domctl(domctl, d, u_domctl);
> + break;
> }
> +
> + if ( copyback && __copy_to_guest(u_domctl, domctl, 1) )
> + ret = -EFAULT;
> +
> + return ret;
> }
>
> void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 0a243b0..cd04dec 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -35,6 +35,9 @@
> #include <asm/current.h>
> #include <asm/flushtlb.h>
> #include <public/memory.h>
> +#include <xen/mem_event.h>
> +#include <xen/mem_access.h>
> +#include <xen/hypercall.h>
> #include <xen/sched.h>
> #include <xen/vmap.h>
> #include <xsm/xsm.h>
> @@ -1111,18 +1114,27 @@ int xenmem_add_to_physmap_one(
>
> long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
> {
> - switch ( op )
> +
> + long rc;
> +
> + switch ( op & MEMOP_CMD_MASK )
> {
> /* XXX: memsharing not working yet */
> case XENMEM_get_sharing_shared_pages:
> case XENMEM_get_sharing_freed_pages:
> return 0;
> + case XENMEM_access_op:
> + {
> + rc = mem_access_memop(op, guest_handle_cast(arg,
> xen_mem_access_op_t));
> + break;
> + }
>
> default:
> - return -ENOSYS;
> + rc = -ENOSYS;
> + break;
> }
>
> - return 0;
> + return rc;
> }
>
> struct domain *page_get_owner_and_reference(struct page_info *page)
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index 143199b..0ca0d2f 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -10,6 +10,9 @@
> #include <asm/event.h>
> #include <asm/hardirq.h>
> #include <asm/page.h>
> +#include <xen/mem_event.h>
> +#include <public/mem_event.h>
> +#include <xen/mem_access.h>
>
> /* First level P2M is 2 consecutive pages */
> #define P2M_FIRST_ORDER 1
> @@ -999,6 +1002,8 @@ int p2m_init(struct domain *d)
> p2m->max_mapped_gfn = 0;
> p2m->lowest_mapped_gfn = ULONG_MAX;
>
> + p2m->default_access = p2m_access_rwx;
> +
> err:
> spin_unlock(&p2m->lock);
>
> diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
> index 84acdf9..6bb9cf4 100644
> --- a/xen/common/mem_access.c
> +++ b/xen/common/mem_access.c
> @@ -29,8 +29,6 @@
> #include <xen/mem_event.h>
> #include <xsm/xsm.h>
>
> -#ifdef CONFIG_X86
> -
> int mem_access_memop(unsigned long cmd,
> XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg)
> {
> @@ -45,9 +43,11 @@ int mem_access_memop(unsigned long cmd,
> if ( rc )
> return rc;
>
> +#ifdef CONFIG_X86
> rc = -EINVAL;
> if ( !is_hvm_domain(d) )
> goto out;
> +#endif
>
> rc = xsm_mem_event_op(XSM_DM_PRIV, d, XENMEM_access_op);
> if ( rc )
> @@ -125,8 +125,6 @@ int mem_access_send_req(struct domain *d,
> mem_event_request_t *req)
> return 0;
> }
>
> -#endif /* CONFIG_X86 */
> -
> /*
> * Local variables:
> * mode: C
> diff --git a/xen/common/mem_event.c b/xen/common/mem_event.c
> index a94ddf6..2a91928 100644
> --- a/xen/common/mem_event.c
> +++ b/xen/common/mem_event.c
> @@ -20,16 +20,19 @@
> * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
> USA
> */
>
> -#ifdef CONFIG_X86
> -
> +#include <xen/sched.h>
> #include <asm/domain.h>
> #include <xen/event.h>
> #include <xen/wait.h>
> #include <asm/p2m.h>
> #include <xen/mem_event.h>
> #include <xen/mem_access.h>
> +
> +#ifdef CONFIG_X86
> #include <asm/mem_paging.h>
> #include <asm/mem_sharing.h>
> +#endif
> +
> #include <xsm/xsm.h>
>
> /* for public/io/ring.h macros */
> @@ -427,6 +430,7 @@ static void mem_access_notification(struct vcpu *v,
> unsigned int port)
> p2m_mem_access_resume(v->domain);
> }
>
> +#ifdef CONFIG_X86
> /* Registered with Xen-bound event channel for incoming notifications. */
> static void mem_paging_notification(struct vcpu *v, unsigned int port)
> {
> @@ -470,6 +474,7 @@ int do_mem_event_op(int op, uint32_t domain, void *arg)
> rcu_unlock_domain(d);
> return ret;
> }
> +#endif
>
> /* Clean up on domain destruction */
> void mem_event_cleanup(struct domain *d)
> @@ -538,6 +543,8 @@ int mem_event_domctl(struct domain *d,
> xen_domctl_mem_event_op_t *mec,
> {
> case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE:
> {
> +
> +#ifdef CONFIG_X86
> rc = -ENODEV;
> /* Only HAP is supported */
> if ( !hap_enabled(d) )
> @@ -546,6 +553,7 @@ int mem_event_domctl(struct domain *d,
> xen_domctl_mem_event_op_t *mec,
> /* Currently only EPT is supported */
> if ( !cpu_has_vmx )
> break;
> +#endif
>
> rc = mem_event_enable(d, mec, med, _VPF_mem_access,
> HVM_PARAM_ACCESS_RING_PFN,
> @@ -567,6 +575,7 @@ int mem_event_domctl(struct domain *d,
> xen_domctl_mem_event_op_t *mec,
> }
> break;
>
> +#ifdef CONFIG_X86
> case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
> {
> struct mem_event_domain *med = &d->mem_event->paging;
> @@ -656,6 +665,7 @@ int mem_event_domctl(struct domain *d,
> xen_domctl_mem_event_op_t *mec,
> }
> }
> break;
> +#endif
>
> default:
> rc = -ENOSYS;
> @@ -695,7 +705,6 @@ void mem_event_vcpu_unpause(struct vcpu *v)
>
> vcpu_unpause(v);
> }
> -#endif
>
> /*
> * Local variables:
> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index 06c93a0..f3d1f33 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -2,9 +2,55 @@
> #define _XEN_P2M_H
>
> #include <xen/mm.h>
> +#include <public/memory.h>
> +#include <public/mem_event.h>
>
> struct domain;
>
> +/* List of possible type for each page in the p2m entry.
> + * The number of available bit per page in the pte for this purpose is 4
> bits.
> + * So it's possible to only have 16 fields. If we run out of value in the
> + * future, it's possible to use higher value for pseudo-type and don't
> store
> + * them in the p2m entry.
> + */
> +typedef enum {
> + p2m_invalid = 0, /* Nothing mapped here */
> + p2m_ram_rw, /* Normal read/write guest RAM */
> + p2m_ram_ro, /* Read-only; writes are silently dropped */
> + p2m_mmio_direct, /* Read/write mapping of genuine MMIO area */
> + p2m_map_foreign, /* Ram pages from foreign domain */
> + p2m_grant_map_rw, /* Read/write grant mapping */
> + p2m_grant_map_ro, /* Read-only grant mapping */
> + /* The types below are only used to decide the page attribute in the
> P2M */
> + p2m_iommu_map_rw, /* Read/write iommu mapping */
> + p2m_iommu_map_ro, /* Read-only iommu mapping */
> + p2m_max_real_type, /* Types after this won't be store in the p2m */
> +} p2m_type_t;
> +
> +/*
> + * Additional access types, which are used to further restrict
> + * the permissions given by the p2m_type_t memory type. Violations
> + * caused by p2m_access_t restrictions are sent to the mem_event
> + * interface.
> + *
> + * The access permissions are soft state: when any ambigious change of
> page
> + * type or use occurs, or when pages are flushed, swapped, or at any other
> + * convenient type, the access permissions can get reset to the p2m_domain
> + * default.
> + */
> +typedef enum {
> + p2m_access_n = 0, /* No access permissions allowed */
> + p2m_access_r = 1,
> + p2m_access_w = 2,
> + p2m_access_rw = 3,
> + p2m_access_x = 4,
> + p2m_access_rx = 5,
> + p2m_access_wx = 6,
> + p2m_access_rwx = 7
> +
> + /* NOTE: Assumed to be only 4 bits right now */
> +} p2m_access_t;
> +
> /* Per-p2m-table state */
> struct p2m_domain {
> /* Lock that protects updates to the p2m */
> @@ -38,27 +84,17 @@ struct p2m_domain {
> * at each p2m tree level. */
> unsigned long shattered[4];
> } stats;
> -};
>
> -/* List of possible type for each page in the p2m entry.
> - * The number of available bit per page in the pte for this purpose is 4
> bits.
> - * So it's possible to only have 16 fields. If we run out of value in the
> - * future, it's possible to use higher value for pseudo-type and don't
> store
> - * them in the p2m entry.
> - */
> -typedef enum {
> - p2m_invalid = 0, /* Nothing mapped here */
> - p2m_ram_rw, /* Normal read/write guest RAM */
> - p2m_ram_ro, /* Read-only; writes are silently dropped */
> - p2m_mmio_direct, /* Read/write mapping of genuine MMIO area */
> - p2m_map_foreign, /* Ram pages from foreign domain */
> - p2m_grant_map_rw, /* Read/write grant mapping */
> - p2m_grant_map_ro, /* Read-only grant mapping */
> - /* The types below are only used to decide the page attribute in the
> P2M */
> - p2m_iommu_map_rw, /* Read/write iommu mapping */
> - p2m_iommu_map_ro, /* Read-only iommu mapping */
> - p2m_max_real_type, /* Types after this won't be store in the p2m */
> -} p2m_type_t;
> + /* Default P2M access type for each page in the the domain: new pages,
> + * swapped in pages, cleared pages, and pages that are ambiquously
> + * retyped get this access type. See definition of p2m_access_t. */
> + p2m_access_t default_access;
> +
> + /* If true, and an access fault comes in and there is no mem_event
> listener,
> + * pause domain. Otherwise, remove access restrictions. */
> + bool_t access_required;
> +
> +};
>
> #define p2m_is_foreign(_t) ((_t) == p2m_map_foreign)
> #define p2m_is_ram(_t) ((_t) == p2m_ram_rw || (_t) == p2m_ram_ro)
> @@ -195,6 +231,30 @@ static inline int get_page_and_type(struct page_info
> *page,
> return rc;
> }
>
> +/* get host p2m table */
> +#define p2m_get_hostp2m(d) (&((d)->arch.p2m))
> +
> +/* Resumes the running of the VCPU, restarting the last instruction */
> +static inline void p2m_mem_access_resume(struct domain *d) {}
> +
> +/* Set access type for a region of pfns.
> + * If start_pfn == -1ul, sets the default access type */
> +static inline
> +long p2m_set_mem_access(struct domain *d, unsigned long start_pfn,
> uint32_t nr,
> + uint32_t start, uint32_t mask, xenmem_access_t
> access)
> +{
> + return -ENOSYS;
> +}
> +
> +/* Get access type for a pfn
> + * If pfn == -1ul, gets the default access type */
> +static inline
> +int p2m_get_mem_access(struct domain *d, unsigned long pfn,
> + xenmem_access_t *access)
> +{
> + return -ENOSYS;
> +}
> +
> #endif /* _XEN_P2M_H */
>
> /*
> diff --git a/xen/include/xen/mem_access.h b/xen/include/xen/mem_access.h
> index ded5441..5c7c5fd 100644
> --- a/xen/include/xen/mem_access.h
> +++ b/xen/include/xen/mem_access.h
> @@ -23,29 +23,10 @@
> #ifndef _XEN_ASM_MEM_ACCESS_H
> #define _XEN_ASM_MEM_ACCESS_H
>
> -#ifdef CONFIG_X86
> -
> int mem_access_memop(unsigned long cmd,
> XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
> int mem_access_send_req(struct domain *d, mem_event_request_t *req);
>
> -#else
> -
> -static inline
> -int mem_access_memop(unsigned long cmd,
> - XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg)
> -{
> - return -ENOSYS;
> -}
> -
> -static inline
> -int mem_access_send_req(struct domain *d, mem_event_request_t *req)
> -{
> - return -ENOSYS;
> -}
> -
> -#endif /* CONFIG_X86 */
> -
> #endif /* _XEN_ASM_MEM_ACCESS_H */
>
> /*
> diff --git a/xen/include/xen/mem_event.h b/xen/include/xen/mem_event.h
> index a28d453..e2a9d4d 100644
> --- a/xen/include/xen/mem_event.h
> +++ b/xen/include/xen/mem_event.h
> @@ -24,8 +24,6 @@
> #ifndef __MEM_EVENT_H__
> #define __MEM_EVENT_H__
>
> -#ifdef CONFIG_X86
> -
> /* Clean up on domain destruction */
> void mem_event_cleanup(struct domain *d);
>
> @@ -67,66 +65,25 @@ void mem_event_put_request(struct domain *d, struct
> mem_event_domain *med,
> int mem_event_get_response(struct domain *d, struct mem_event_domain *med,
> mem_event_response_t *rsp);
>
> -int do_mem_event_op(int op, uint32_t domain, void *arg);
> int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
> XEN_GUEST_HANDLE_PARAM(void) u_domctl);
>
> void mem_event_vcpu_pause(struct vcpu *v);
> void mem_event_vcpu_unpause(struct vcpu *v);
>
> -#else
> -
> -static inline void mem_event_cleanup(struct domain *d) {}
> -
> -static inline bool_t mem_event_check_ring(struct mem_event_domain *med)
> -{
> - return 0;
> -}
> -
> -static inline int mem_event_claim_slot(struct domain *d,
> - struct mem_event_domain *med)
> -{
> - return -ENOSYS;
> -}
> -
> -static inline int mem_event_claim_slot_nosleep(struct domain *d,
> - struct mem_event_domain *med)
> -{
> - return -ENOSYS;
> -}
> -
> -static inline
> -void mem_event_cancel_slot(struct domain *d, struct mem_event_domain *med)
> -{}
> -
> -static inline
> -void mem_event_put_request(struct domain *d, struct mem_event_domain *med,
> - mem_event_request_t *req)
> -{}
> +#ifdef CONFIG_X86
>
> -static inline
> -int mem_event_get_response(struct domain *d, struct mem_event_domain *med,
> - mem_event_response_t *rsp)
> -{
> - return -ENOSYS;
> -}
> +int do_mem_event_op(int op, uint32_t domain, void *arg);
>
> -static inline int do_mem_event_op(int op, uint32_t domain, void *arg)
> -{
> - return -ENOSYS;
> -}
> +#else
>
> static inline
> -int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
> - XEN_GUEST_HANDLE_PARAM(void) u_domctl)
> +int do_mem_event_op(int op, uint32_t domain, void *arg)
> {
> return -ENOSYS;
> }
>
> -static inline void mem_event_vcpu_pause(struct vcpu *v) {}
> -static inline void mem_event_vcpu_unpause(struct vcpu *v) {}
> -
> -#endif /* CONFIG_X86 */
> +#endif
>
> #endif /* __MEM_EVENT_H__ */
>
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> index 4575dda..2365fad 100644
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -1,4 +1,3 @@
> -
> #ifndef __SCHED_H__
> #define __SCHED_H__
>
> diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
> index c5aa316..61677ea 100644
> --- a/xen/include/xsm/dummy.h
> +++ b/xen/include/xsm/dummy.h
> @@ -507,6 +507,18 @@ static XSM_INLINE int
> xsm_hvm_param_nested(XSM_DEFAULT_ARG struct domain *d)
> return xsm_default_action(action, current->domain, d);
> }
>
> +static XSM_INLINE int xsm_mem_event_control(XSM_DEFAULT_ARG struct domain
> *d, int mode, int op)
> +{
> + XSM_ASSERT_ACTION(XSM_PRIV);
> + return xsm_default_action(action, current->domain, d);
> +}
> +
> +static XSM_INLINE int xsm_mem_event_op(XSM_DEFAULT_ARG struct domain *d,
> int op)
> +{
> + XSM_ASSERT_ACTION(XSM_DM_PRIV);
> + return xsm_default_action(action, current->domain, d);
> +}
> +
> #ifdef CONFIG_X86
> static XSM_INLINE int xsm_do_mca(XSM_DEFAULT_VOID)
> {
> @@ -550,18 +562,6 @@ static XSM_INLINE int
> xsm_hvm_ioreq_server(XSM_DEFAULT_ARG struct domain *d, int
> return xsm_default_action(action, current->domain, d);
> }
>
> -static XSM_INLINE int xsm_mem_event_control(XSM_DEFAULT_ARG struct domain
> *d, int mode, int op)
> -{
> - XSM_ASSERT_ACTION(XSM_PRIV);
> - return xsm_default_action(action, current->domain, d);
> -}
> -
> -static XSM_INLINE int xsm_mem_event_op(XSM_DEFAULT_ARG struct domain *d,
> int op)
> -{
> - XSM_ASSERT_ACTION(XSM_DM_PRIV);
> - return xsm_default_action(action, current->domain, d);
> -}
> -
> static XSM_INLINE int xsm_mem_sharing_op(XSM_DEFAULT_ARG struct domain
> *d, struct domain *cd, int op)
> {
> XSM_ASSERT_ACTION(XSM_DM_PRIV);
> diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
> index a85045d..64289cd 100644
> --- a/xen/include/xsm/xsm.h
> +++ b/xen/include/xsm/xsm.h
> @@ -140,6 +140,9 @@ struct xsm_operations {
> int (*hvm_control) (struct domain *d, unsigned long op);
> int (*hvm_param_nested) (struct domain *d);
>
> + int (*mem_event_control) (struct domain *d, int mode, int op);
> + int (*mem_event_op) (struct domain *d, int op);
> +
> #ifdef CONFIG_X86
> int (*do_mca) (void);
> int (*shadow_control) (struct domain *d, uint32_t op);
> @@ -148,8 +151,6 @@ struct xsm_operations {
> int (*hvm_set_pci_link_route) (struct domain *d);
> int (*hvm_inject_msi) (struct domain *d);
> int (*hvm_ioreq_server) (struct domain *d, int op);
> - int (*mem_event_control) (struct domain *d, int mode, int op);
> - int (*mem_event_op) (struct domain *d, int op);
> int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
> int (*apic) (struct domain *d, int cmd);
> int (*memtype) (uint32_t access);
> @@ -534,6 +535,16 @@ static inline int xsm_hvm_param_nested (xsm_default_t
> def, struct domain *d)
> return xsm_ops->hvm_param_nested(d);
> }
>
> +static inline int xsm_mem_event_control (xsm_default_t def, struct domain
> *d, int mode, int op)
> +{
> + return xsm_ops->mem_event_control(d, mode, op);
> +}
> +
> +static inline int xsm_mem_event_op (xsm_default_t def, struct domain *d,
> int op)
> +{
> + return xsm_ops->mem_event_op(d, op);
> +}
> +
> #ifdef CONFIG_X86
> static inline int xsm_do_mca(xsm_default_t def)
> {
> @@ -570,16 +581,6 @@ static inline int xsm_hvm_ioreq_server (xsm_default_t
> def, struct domain *d, int
> return xsm_ops->hvm_ioreq_server(d, op);
> }
>
> -static inline int xsm_mem_event_control (xsm_default_t def, struct domain
> *d, int mode, int op)
> -{
> - return xsm_ops->mem_event_control(d, mode, op);
> -}
> -
> -static inline int xsm_mem_event_op (xsm_default_t def, struct domain *d,
> int op)
> -{
> - return xsm_ops->mem_event_op(d, op);
> -}
> -
> static inline int xsm_mem_sharing_op (xsm_default_t def, struct domain
> *d, struct domain *cd, int op)
> {
> return xsm_ops->mem_sharing_op(d, cd, op);
> diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
> index c95c803..9df9d81 100644
> --- a/xen/xsm/dummy.c
> +++ b/xen/xsm/dummy.c
> @@ -116,6 +116,8 @@ void xsm_fixup_ops (struct xsm_operations *ops)
> set_to_dummy_if_null(ops, add_to_physmap);
> set_to_dummy_if_null(ops, remove_from_physmap);
> set_to_dummy_if_null(ops, map_gmfn_foreign);
> + set_to_dummy_if_null(ops, mem_event_control);
> + set_to_dummy_if_null(ops, mem_event_op);
>
> #ifdef CONFIG_X86
> set_to_dummy_if_null(ops, do_mca);
> @@ -125,8 +127,6 @@ void xsm_fixup_ops (struct xsm_operations *ops)
> set_to_dummy_if_null(ops, hvm_set_pci_link_route);
> set_to_dummy_if_null(ops, hvm_inject_msi);
> set_to_dummy_if_null(ops, hvm_ioreq_server);
> - set_to_dummy_if_null(ops, mem_event_control);
> - set_to_dummy_if_null(ops, mem_event_op);
> set_to_dummy_if_null(ops, mem_sharing_op);
> set_to_dummy_if_null(ops, apic);
> set_to_dummy_if_null(ops, platform_op);
> --
> 2.0.1
>
>
[-- Attachment #1.2: Type: text/html, Size: 25774 bytes --]
[-- Attachment #2: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
next prev parent reply other threads:[~2014-08-25 17:25 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-08-22 9:30 [PATCH RFC 0/7] Mem_event and mem_access for ARM Tamas K Lengyel
2014-08-22 9:30 ` [PATCH RFC 1/7] xen: Relocate mem_access and mem_event into common Tamas K Lengyel
2014-08-25 17:19 ` Andres Lagar Cavilla
2014-08-26 10:52 ` Tamas K Lengyel
2014-08-26 12:42 ` Jan Beulich
2014-08-26 13:25 ` Tamas K Lengyel
2014-08-26 13:34 ` Jan Beulich
2014-08-26 14:42 ` Tamas K Lengyel
2014-08-26 15:32 ` Jan Beulich
2014-08-26 16:30 ` Tamas K Lengyel
2014-08-27 6:29 ` Jan Beulich
2014-08-22 9:30 ` [PATCH RFC 2/7] xen/mem_event: Clean out superflous white-spaces Tamas K Lengyel
2014-08-25 17:20 ` Andres Lagar Cavilla
2014-08-26 13:35 ` Jan Beulich
2014-08-26 13:59 ` Tamas K Lengyel
2014-08-22 9:30 ` [PATCH RFC 3/7] xen/arm: Enable the compilation of mem_access and mem_event on ARM Tamas K Lengyel
2014-08-25 17:25 ` Andres Lagar Cavilla [this message]
2014-08-26 8:32 ` Tamas K Lengyel
2014-08-26 13:51 ` Jan Beulich
[not found] ` <CAErYnshbvgxzBVSPu0mM3UUc0kr_zfENiHw9KmT=30-kpy_DZA@mail.gmail.com>
2014-08-26 14:38 ` Jan Beulich
2014-08-26 15:21 ` Tamas K Lengyel
2014-08-26 15:33 ` Jan Beulich
2014-08-22 9:30 ` [PATCH RFC 4/7] tools/libxc: Allocate magic page for mem access " Tamas K Lengyel
2014-08-22 9:30 ` [PATCH RFC 5/7] xen/arm: Data abort exception (R/W) mem_events Tamas K Lengyel
2014-08-22 9:30 ` [PATCH RFC 6/7] xen/arm: Instruction prefetch abort (X) mem_event handling Tamas K Lengyel
2014-08-22 9:30 ` [PATCH RFC 7/7] tools/tests: Enable xen-access on ARM Tamas K Lengyel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to='CADzFZPuY_ifdNe+=N6sBcNxuUNfPEPT2TKvk_mcfY9AEVPLdpw@mail.gmail.com' \
--to=andres@lagarcavilla.org \
--cc=dgdegra@tycho.nsa.gov \
--cc=ian.campbell@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=keir@xen.org \
--cc=stefano.stabellini@citrix.com \
--cc=tamas.lengyel@zentific.com \
--cc=tim@xen.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).