From mboxrd@z Thu Jan 1 00:00:00 1970 From: Andres Lagar Cavilla Subject: Re: [PATCH RFC 3/7] xen/arm: Enable the compilation of mem_access and mem_event on ARM. Date: Mon, 25 Aug 2014 10:25:40 -0700 Message-ID: References: <1408699832-13325-1-git-send-email-tamas.lengyel@zentific.com> <1408699832-13325-4-git-send-email-tamas.lengyel@zentific.com> Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="===============6240843452304135892==" Return-path: In-Reply-To: <1408699832-13325-4-git-send-email-tamas.lengyel@zentific.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: Tamas K Lengyel Cc: keir@xen.org, Ian Campbell , Tim Deegan , Ian Jackson , Xen-devel , stefano.stabellini@citrix.com, Jan Beulich , dgdegra@tycho.nsa.gov List-Id: xen-devel@lists.xenproject.org --===============6240843452304135892== Content-Type: multipart/alternative; boundary=089e010d8406ddcd3605017779e2 --089e010d8406ddcd3605017779e2 Content-Type: text/plain; charset=UTF-8 On Fri, Aug 22, 2014 at 2:30 AM, Tamas K Lengyel wrote: > This patch sets up the infrastructure to support mem_access and mem_event > on ARM and turns on compilation. We define the required XSM functions, > handling of domctl copyback, and the required p2m types and stub-functions > in this patch. > > Signed-off-by: Tamas K Lengyel > Non-ARM bits LGTM to me. I see here the disablement of CONFIG_X86. If Xen were to ever support another architecture (hello IA64), it might be more reasonable to keep an #ifdef CONFIG_X86 && CONFIG_ARM. I don't know how unlikely that future direction might be. Andres > --- > xen/arch/arm/domctl.c | 36 ++++++++++++++-- > xen/arch/arm/mm.c | 18 ++++++-- > xen/arch/arm/p2m.c | 5 +++ > xen/common/mem_access.c | 6 +-- > xen/common/mem_event.c | 15 +++++-- > xen/include/asm-arm/p2m.h | 100 > ++++++++++++++++++++++++++++++++++--------- > xen/include/xen/mem_access.h | 19 -------- > xen/include/xen/mem_event.h | 53 +++-------------------- > xen/include/xen/sched.h | 1 - > xen/include/xsm/dummy.h | 24 +++++------ > xen/include/xsm/xsm.h | 25 +++++------ > xen/xsm/dummy.c | 4 +- > 12 files changed, 178 insertions(+), 128 deletions(-) > > diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c > index 45974e7..bb0b8d3 100644 > --- a/xen/arch/arm/domctl.c > +++ b/xen/arch/arm/domctl.c > @@ -11,10 +11,17 @@ > #include > #include > #include > +#include > +#include > +#include > > long arch_do_domctl(struct xen_domctl *domctl, struct domain *d, > XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) > { > + > + long ret; > + bool_t copyback = 0; > + > switch ( domctl->cmd ) > { > case XEN_DOMCTL_cacheflush: > @@ -23,17 +30,38 @@ long arch_do_domctl(struct xen_domctl *domctl, struct > domain *d, > unsigned long e = s + domctl->u.cacheflush.nr_pfns; > > if ( domctl->u.cacheflush.nr_pfns > (1U< - return -EINVAL; > + { > + ret = -EINVAL; > + break; > + } > > if ( e < s ) > - return -EINVAL; > + { > + ret = -EINVAL; > + break; > + } > > - return p2m_cache_flush(d, s, e); > + ret = p2m_cache_flush(d, s, e); > } > + break; > + > + case XEN_DOMCTL_mem_event_op: > + { > + ret = mem_event_domctl(d, &domctl->u.mem_event_op, > + guest_handle_cast(u_domctl, void)); > + copyback = 1; > + } > + break; > > default: > - return subarch_do_domctl(domctl, d, u_domctl); > + ret = subarch_do_domctl(domctl, d, u_domctl); > + break; > } > + > + if ( copyback && __copy_to_guest(u_domctl, domctl, 1) ) > + ret = -EFAULT; > + > + return ret; > } > > void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c) > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c > index 0a243b0..cd04dec 100644 > --- a/xen/arch/arm/mm.c > +++ b/xen/arch/arm/mm.c > @@ -35,6 +35,9 @@ > #include > #include > #include > +#include > +#include > +#include > #include > #include > #include > @@ -1111,18 +1114,27 @@ int xenmem_add_to_physmap_one( > > long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg) > { > - switch ( op ) > + > + long rc; > + > + switch ( op & MEMOP_CMD_MASK ) > { > /* XXX: memsharing not working yet */ > case XENMEM_get_sharing_shared_pages: > case XENMEM_get_sharing_freed_pages: > return 0; > + case XENMEM_access_op: > + { > + rc = mem_access_memop(op, guest_handle_cast(arg, > xen_mem_access_op_t)); > + break; > + } > > default: > - return -ENOSYS; > + rc = -ENOSYS; > + break; > } > > - return 0; > + return rc; > } > > struct domain *page_get_owner_and_reference(struct page_info *page) > diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c > index 143199b..0ca0d2f 100644 > --- a/xen/arch/arm/p2m.c > +++ b/xen/arch/arm/p2m.c > @@ -10,6 +10,9 @@ > #include > #include > #include > +#include > +#include > +#include > > /* First level P2M is 2 consecutive pages */ > #define P2M_FIRST_ORDER 1 > @@ -999,6 +1002,8 @@ int p2m_init(struct domain *d) > p2m->max_mapped_gfn = 0; > p2m->lowest_mapped_gfn = ULONG_MAX; > > + p2m->default_access = p2m_access_rwx; > + > err: > spin_unlock(&p2m->lock); > > diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c > index 84acdf9..6bb9cf4 100644 > --- a/xen/common/mem_access.c > +++ b/xen/common/mem_access.c > @@ -29,8 +29,6 @@ > #include > #include > > -#ifdef CONFIG_X86 > - > int mem_access_memop(unsigned long cmd, > XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg) > { > @@ -45,9 +43,11 @@ int mem_access_memop(unsigned long cmd, > if ( rc ) > return rc; > > +#ifdef CONFIG_X86 > rc = -EINVAL; > if ( !is_hvm_domain(d) ) > goto out; > +#endif > > rc = xsm_mem_event_op(XSM_DM_PRIV, d, XENMEM_access_op); > if ( rc ) > @@ -125,8 +125,6 @@ int mem_access_send_req(struct domain *d, > mem_event_request_t *req) > return 0; > } > > -#endif /* CONFIG_X86 */ > - > /* > * Local variables: > * mode: C > diff --git a/xen/common/mem_event.c b/xen/common/mem_event.c > index a94ddf6..2a91928 100644 > --- a/xen/common/mem_event.c > +++ b/xen/common/mem_event.c > @@ -20,16 +20,19 @@ > * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 > USA > */ > > -#ifdef CONFIG_X86 > - > +#include > #include > #include > #include > #include > #include > #include > + > +#ifdef CONFIG_X86 > #include > #include > +#endif > + > #include > > /* for public/io/ring.h macros */ > @@ -427,6 +430,7 @@ static void mem_access_notification(struct vcpu *v, > unsigned int port) > p2m_mem_access_resume(v->domain); > } > > +#ifdef CONFIG_X86 > /* Registered with Xen-bound event channel for incoming notifications. */ > static void mem_paging_notification(struct vcpu *v, unsigned int port) > { > @@ -470,6 +474,7 @@ int do_mem_event_op(int op, uint32_t domain, void *arg) > rcu_unlock_domain(d); > return ret; > } > +#endif > > /* Clean up on domain destruction */ > void mem_event_cleanup(struct domain *d) > @@ -538,6 +543,8 @@ int mem_event_domctl(struct domain *d, > xen_domctl_mem_event_op_t *mec, > { > case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE: > { > + > +#ifdef CONFIG_X86 > rc = -ENODEV; > /* Only HAP is supported */ > if ( !hap_enabled(d) ) > @@ -546,6 +553,7 @@ int mem_event_domctl(struct domain *d, > xen_domctl_mem_event_op_t *mec, > /* Currently only EPT is supported */ > if ( !cpu_has_vmx ) > break; > +#endif > > rc = mem_event_enable(d, mec, med, _VPF_mem_access, > HVM_PARAM_ACCESS_RING_PFN, > @@ -567,6 +575,7 @@ int mem_event_domctl(struct domain *d, > xen_domctl_mem_event_op_t *mec, > } > break; > > +#ifdef CONFIG_X86 > case XEN_DOMCTL_MEM_EVENT_OP_PAGING: > { > struct mem_event_domain *med = &d->mem_event->paging; > @@ -656,6 +665,7 @@ int mem_event_domctl(struct domain *d, > xen_domctl_mem_event_op_t *mec, > } > } > break; > +#endif > > default: > rc = -ENOSYS; > @@ -695,7 +705,6 @@ void mem_event_vcpu_unpause(struct vcpu *v) > > vcpu_unpause(v); > } > -#endif > > /* > * Local variables: > diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h > index 06c93a0..f3d1f33 100644 > --- a/xen/include/asm-arm/p2m.h > +++ b/xen/include/asm-arm/p2m.h > @@ -2,9 +2,55 @@ > #define _XEN_P2M_H > > #include > +#include > +#include > > struct domain; > > +/* List of possible type for each page in the p2m entry. > + * The number of available bit per page in the pte for this purpose is 4 > bits. > + * So it's possible to only have 16 fields. If we run out of value in the > + * future, it's possible to use higher value for pseudo-type and don't > store > + * them in the p2m entry. > + */ > +typedef enum { > + p2m_invalid = 0, /* Nothing mapped here */ > + p2m_ram_rw, /* Normal read/write guest RAM */ > + p2m_ram_ro, /* Read-only; writes are silently dropped */ > + p2m_mmio_direct, /* Read/write mapping of genuine MMIO area */ > + p2m_map_foreign, /* Ram pages from foreign domain */ > + p2m_grant_map_rw, /* Read/write grant mapping */ > + p2m_grant_map_ro, /* Read-only grant mapping */ > + /* The types below are only used to decide the page attribute in the > P2M */ > + p2m_iommu_map_rw, /* Read/write iommu mapping */ > + p2m_iommu_map_ro, /* Read-only iommu mapping */ > + p2m_max_real_type, /* Types after this won't be store in the p2m */ > +} p2m_type_t; > + > +/* > + * Additional access types, which are used to further restrict > + * the permissions given by the p2m_type_t memory type. Violations > + * caused by p2m_access_t restrictions are sent to the mem_event > + * interface. > + * > + * The access permissions are soft state: when any ambigious change of > page > + * type or use occurs, or when pages are flushed, swapped, or at any other > + * convenient type, the access permissions can get reset to the p2m_domain > + * default. > + */ > +typedef enum { > + p2m_access_n = 0, /* No access permissions allowed */ > + p2m_access_r = 1, > + p2m_access_w = 2, > + p2m_access_rw = 3, > + p2m_access_x = 4, > + p2m_access_rx = 5, > + p2m_access_wx = 6, > + p2m_access_rwx = 7 > + > + /* NOTE: Assumed to be only 4 bits right now */ > +} p2m_access_t; > + > /* Per-p2m-table state */ > struct p2m_domain { > /* Lock that protects updates to the p2m */ > @@ -38,27 +84,17 @@ struct p2m_domain { > * at each p2m tree level. */ > unsigned long shattered[4]; > } stats; > -}; > > -/* List of possible type for each page in the p2m entry. > - * The number of available bit per page in the pte for this purpose is 4 > bits. > - * So it's possible to only have 16 fields. If we run out of value in the > - * future, it's possible to use higher value for pseudo-type and don't > store > - * them in the p2m entry. > - */ > -typedef enum { > - p2m_invalid = 0, /* Nothing mapped here */ > - p2m_ram_rw, /* Normal read/write guest RAM */ > - p2m_ram_ro, /* Read-only; writes are silently dropped */ > - p2m_mmio_direct, /* Read/write mapping of genuine MMIO area */ > - p2m_map_foreign, /* Ram pages from foreign domain */ > - p2m_grant_map_rw, /* Read/write grant mapping */ > - p2m_grant_map_ro, /* Read-only grant mapping */ > - /* The types below are only used to decide the page attribute in the > P2M */ > - p2m_iommu_map_rw, /* Read/write iommu mapping */ > - p2m_iommu_map_ro, /* Read-only iommu mapping */ > - p2m_max_real_type, /* Types after this won't be store in the p2m */ > -} p2m_type_t; > + /* Default P2M access type for each page in the the domain: new pages, > + * swapped in pages, cleared pages, and pages that are ambiquously > + * retyped get this access type. See definition of p2m_access_t. */ > + p2m_access_t default_access; > + > + /* If true, and an access fault comes in and there is no mem_event > listener, > + * pause domain. Otherwise, remove access restrictions. */ > + bool_t access_required; > + > +}; > > #define p2m_is_foreign(_t) ((_t) == p2m_map_foreign) > #define p2m_is_ram(_t) ((_t) == p2m_ram_rw || (_t) == p2m_ram_ro) > @@ -195,6 +231,30 @@ static inline int get_page_and_type(struct page_info > *page, > return rc; > } > > +/* get host p2m table */ > +#define p2m_get_hostp2m(d) (&((d)->arch.p2m)) > + > +/* Resumes the running of the VCPU, restarting the last instruction */ > +static inline void p2m_mem_access_resume(struct domain *d) {} > + > +/* Set access type for a region of pfns. > + * If start_pfn == -1ul, sets the default access type */ > +static inline > +long p2m_set_mem_access(struct domain *d, unsigned long start_pfn, > uint32_t nr, > + uint32_t start, uint32_t mask, xenmem_access_t > access) > +{ > + return -ENOSYS; > +} > + > +/* Get access type for a pfn > + * If pfn == -1ul, gets the default access type */ > +static inline > +int p2m_get_mem_access(struct domain *d, unsigned long pfn, > + xenmem_access_t *access) > +{ > + return -ENOSYS; > +} > + > #endif /* _XEN_P2M_H */ > > /* > diff --git a/xen/include/xen/mem_access.h b/xen/include/xen/mem_access.h > index ded5441..5c7c5fd 100644 > --- a/xen/include/xen/mem_access.h > +++ b/xen/include/xen/mem_access.h > @@ -23,29 +23,10 @@ > #ifndef _XEN_ASM_MEM_ACCESS_H > #define _XEN_ASM_MEM_ACCESS_H > > -#ifdef CONFIG_X86 > - > int mem_access_memop(unsigned long cmd, > XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg); > int mem_access_send_req(struct domain *d, mem_event_request_t *req); > > -#else > - > -static inline > -int mem_access_memop(unsigned long cmd, > - XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg) > -{ > - return -ENOSYS; > -} > - > -static inline > -int mem_access_send_req(struct domain *d, mem_event_request_t *req) > -{ > - return -ENOSYS; > -} > - > -#endif /* CONFIG_X86 */ > - > #endif /* _XEN_ASM_MEM_ACCESS_H */ > > /* > diff --git a/xen/include/xen/mem_event.h b/xen/include/xen/mem_event.h > index a28d453..e2a9d4d 100644 > --- a/xen/include/xen/mem_event.h > +++ b/xen/include/xen/mem_event.h > @@ -24,8 +24,6 @@ > #ifndef __MEM_EVENT_H__ > #define __MEM_EVENT_H__ > > -#ifdef CONFIG_X86 > - > /* Clean up on domain destruction */ > void mem_event_cleanup(struct domain *d); > > @@ -67,66 +65,25 @@ void mem_event_put_request(struct domain *d, struct > mem_event_domain *med, > int mem_event_get_response(struct domain *d, struct mem_event_domain *med, > mem_event_response_t *rsp); > > -int do_mem_event_op(int op, uint32_t domain, void *arg); > int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec, > XEN_GUEST_HANDLE_PARAM(void) u_domctl); > > void mem_event_vcpu_pause(struct vcpu *v); > void mem_event_vcpu_unpause(struct vcpu *v); > > -#else > - > -static inline void mem_event_cleanup(struct domain *d) {} > - > -static inline bool_t mem_event_check_ring(struct mem_event_domain *med) > -{ > - return 0; > -} > - > -static inline int mem_event_claim_slot(struct domain *d, > - struct mem_event_domain *med) > -{ > - return -ENOSYS; > -} > - > -static inline int mem_event_claim_slot_nosleep(struct domain *d, > - struct mem_event_domain *med) > -{ > - return -ENOSYS; > -} > - > -static inline > -void mem_event_cancel_slot(struct domain *d, struct mem_event_domain *med) > -{} > - > -static inline > -void mem_event_put_request(struct domain *d, struct mem_event_domain *med, > - mem_event_request_t *req) > -{} > +#ifdef CONFIG_X86 > > -static inline > -int mem_event_get_response(struct domain *d, struct mem_event_domain *med, > - mem_event_response_t *rsp) > -{ > - return -ENOSYS; > -} > +int do_mem_event_op(int op, uint32_t domain, void *arg); > > -static inline int do_mem_event_op(int op, uint32_t domain, void *arg) > -{ > - return -ENOSYS; > -} > +#else > > static inline > -int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec, > - XEN_GUEST_HANDLE_PARAM(void) u_domctl) > +int do_mem_event_op(int op, uint32_t domain, void *arg) > { > return -ENOSYS; > } > > -static inline void mem_event_vcpu_pause(struct vcpu *v) {} > -static inline void mem_event_vcpu_unpause(struct vcpu *v) {} > - > -#endif /* CONFIG_X86 */ > +#endif > > #endif /* __MEM_EVENT_H__ */ > > diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h > index 4575dda..2365fad 100644 > --- a/xen/include/xen/sched.h > +++ b/xen/include/xen/sched.h > @@ -1,4 +1,3 @@ > - > #ifndef __SCHED_H__ > #define __SCHED_H__ > > diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h > index c5aa316..61677ea 100644 > --- a/xen/include/xsm/dummy.h > +++ b/xen/include/xsm/dummy.h > @@ -507,6 +507,18 @@ static XSM_INLINE int > xsm_hvm_param_nested(XSM_DEFAULT_ARG struct domain *d) > return xsm_default_action(action, current->domain, d); > } > > +static XSM_INLINE int xsm_mem_event_control(XSM_DEFAULT_ARG struct domain > *d, int mode, int op) > +{ > + XSM_ASSERT_ACTION(XSM_PRIV); > + return xsm_default_action(action, current->domain, d); > +} > + > +static XSM_INLINE int xsm_mem_event_op(XSM_DEFAULT_ARG struct domain *d, > int op) > +{ > + XSM_ASSERT_ACTION(XSM_DM_PRIV); > + return xsm_default_action(action, current->domain, d); > +} > + > #ifdef CONFIG_X86 > static XSM_INLINE int xsm_do_mca(XSM_DEFAULT_VOID) > { > @@ -550,18 +562,6 @@ static XSM_INLINE int > xsm_hvm_ioreq_server(XSM_DEFAULT_ARG struct domain *d, int > return xsm_default_action(action, current->domain, d); > } > > -static XSM_INLINE int xsm_mem_event_control(XSM_DEFAULT_ARG struct domain > *d, int mode, int op) > -{ > - XSM_ASSERT_ACTION(XSM_PRIV); > - return xsm_default_action(action, current->domain, d); > -} > - > -static XSM_INLINE int xsm_mem_event_op(XSM_DEFAULT_ARG struct domain *d, > int op) > -{ > - XSM_ASSERT_ACTION(XSM_DM_PRIV); > - return xsm_default_action(action, current->domain, d); > -} > - > static XSM_INLINE int xsm_mem_sharing_op(XSM_DEFAULT_ARG struct domain > *d, struct domain *cd, int op) > { > XSM_ASSERT_ACTION(XSM_DM_PRIV); > diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h > index a85045d..64289cd 100644 > --- a/xen/include/xsm/xsm.h > +++ b/xen/include/xsm/xsm.h > @@ -140,6 +140,9 @@ struct xsm_operations { > int (*hvm_control) (struct domain *d, unsigned long op); > int (*hvm_param_nested) (struct domain *d); > > + int (*mem_event_control) (struct domain *d, int mode, int op); > + int (*mem_event_op) (struct domain *d, int op); > + > #ifdef CONFIG_X86 > int (*do_mca) (void); > int (*shadow_control) (struct domain *d, uint32_t op); > @@ -148,8 +151,6 @@ struct xsm_operations { > int (*hvm_set_pci_link_route) (struct domain *d); > int (*hvm_inject_msi) (struct domain *d); > int (*hvm_ioreq_server) (struct domain *d, int op); > - int (*mem_event_control) (struct domain *d, int mode, int op); > - int (*mem_event_op) (struct domain *d, int op); > int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op); > int (*apic) (struct domain *d, int cmd); > int (*memtype) (uint32_t access); > @@ -534,6 +535,16 @@ static inline int xsm_hvm_param_nested (xsm_default_t > def, struct domain *d) > return xsm_ops->hvm_param_nested(d); > } > > +static inline int xsm_mem_event_control (xsm_default_t def, struct domain > *d, int mode, int op) > +{ > + return xsm_ops->mem_event_control(d, mode, op); > +} > + > +static inline int xsm_mem_event_op (xsm_default_t def, struct domain *d, > int op) > +{ > + return xsm_ops->mem_event_op(d, op); > +} > + > #ifdef CONFIG_X86 > static inline int xsm_do_mca(xsm_default_t def) > { > @@ -570,16 +581,6 @@ static inline int xsm_hvm_ioreq_server (xsm_default_t > def, struct domain *d, int > return xsm_ops->hvm_ioreq_server(d, op); > } > > -static inline int xsm_mem_event_control (xsm_default_t def, struct domain > *d, int mode, int op) > -{ > - return xsm_ops->mem_event_control(d, mode, op); > -} > - > -static inline int xsm_mem_event_op (xsm_default_t def, struct domain *d, > int op) > -{ > - return xsm_ops->mem_event_op(d, op); > -} > - > static inline int xsm_mem_sharing_op (xsm_default_t def, struct domain > *d, struct domain *cd, int op) > { > return xsm_ops->mem_sharing_op(d, cd, op); > diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c > index c95c803..9df9d81 100644 > --- a/xen/xsm/dummy.c > +++ b/xen/xsm/dummy.c > @@ -116,6 +116,8 @@ void xsm_fixup_ops (struct xsm_operations *ops) > set_to_dummy_if_null(ops, add_to_physmap); > set_to_dummy_if_null(ops, remove_from_physmap); > set_to_dummy_if_null(ops, map_gmfn_foreign); > + set_to_dummy_if_null(ops, mem_event_control); > + set_to_dummy_if_null(ops, mem_event_op); > > #ifdef CONFIG_X86 > set_to_dummy_if_null(ops, do_mca); > @@ -125,8 +127,6 @@ void xsm_fixup_ops (struct xsm_operations *ops) > set_to_dummy_if_null(ops, hvm_set_pci_link_route); > set_to_dummy_if_null(ops, hvm_inject_msi); > set_to_dummy_if_null(ops, hvm_ioreq_server); > - set_to_dummy_if_null(ops, mem_event_control); > - set_to_dummy_if_null(ops, mem_event_op); > set_to_dummy_if_null(ops, mem_sharing_op); > set_to_dummy_if_null(ops, apic); > set_to_dummy_if_null(ops, platform_op); > -- > 2.0.1 > > --089e010d8406ddcd3605017779e2 Content-Type: text/html; charset=UTF-8 Content-Transfer-Encoding: quoted-printable
On F= ri, Aug 22, 2014 at 2:30 AM, Tamas K Lengyel <tamas.lengyel@zent= ific.com> wrote:
This patch sets up the infrastructure to sup= port mem_access and mem_event
=C2=A0on ARM and turns on compilation. We define the required XSM functions= ,
handling of domctl copyback, and the required p2m types and stub-functions<= br> in this patch.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>
Non-ARM bits = LGTM to me. I see here the disablement of CONFIG_X86.

If Xen were to ever support another architecture (hello IA64), it migh= t be more reasonable to keep an #ifdef CONFIG_X86 && CONFIG_ARM. I = don't know how unlikely that future direction might be.

Andres=C2=A0
---
=C2=A0xen/arch/arm/domctl.c=C2=A0 =C2=A0 =C2=A0 =C2=A0 |=C2=A0 36 +++++++++= +++++--
=C2=A0xen/arch/arm/mm.c=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 |=C2=A0 18= ++++++--
=C2=A0xen/arch/arm/p2m.c=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0|=C2=A0 = =C2=A05 +++
=C2=A0xen/common/mem_access.c=C2=A0 =C2=A0 =C2=A0 |=C2=A0 =C2=A06 +--
=C2=A0xen/common/mem_event.c=C2=A0 =C2=A0 =C2=A0 =C2=A0|=C2=A0 15 +++++-- =C2=A0xen/include/asm-arm/p2m.h=C2=A0 =C2=A0 | 100 ++++++++++++++++++++++++= ++++++++++---------
=C2=A0xen/include/xen/mem_access.h |=C2=A0 19 --------
=C2=A0xen/include/xen/mem_event.h=C2=A0 |=C2=A0 53 +++--------------------<= br> =C2=A0xen/include/xen/sched.h=C2=A0 =C2=A0 =C2=A0 |=C2=A0 =C2=A01 -
=C2=A0xen/include/xsm/dummy.h=C2=A0 =C2=A0 =C2=A0 |=C2=A0 24 +++++------ =C2=A0xen/include/xsm/xsm.h=C2=A0 =C2=A0 =C2=A0 =C2=A0 |=C2=A0 25 +++++----= --
=C2=A0xen/xsm/dummy.c=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 |=C2= =A0 =C2=A04 +-
=C2=A012 files changed, 178 insertions(+), 128 deletions(-)

diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
index 45974e7..bb0b8d3 100644
--- a/xen/arch/arm/domctl.c
+++ b/xen/arch/arm/domctl.c
@@ -11,10 +11,17 @@
=C2=A0#include <xen/sched.h>
=C2=A0#include <xen/hypercall.h>
=C2=A0#include <public/domctl.h>
+#include <asm/guest_access.h>
+#include <xen/mem_event.h>
+#include <public/mem_event.h>

=C2=A0long arch_do_domctl(struct xen_domctl *domctl, struct domain *d,
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2= =A0XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
=C2=A0{
+
+=C2=A0 =C2=A0 long ret;
+=C2=A0 =C2=A0 bool_t copyback =3D 0;
+
=C2=A0 =C2=A0 =C2=A0switch ( domctl->cmd )
=C2=A0 =C2=A0 =C2=A0{
=C2=A0 =C2=A0 =C2=A0case XEN_DOMCTL_cacheflush:
@@ -23,17 +30,38 @@ long arch_do_domctl(struct xen_domctl *domctl, struct d= omain *d,
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0unsigned long e =3D s + domctl->u.cach= eflush.nr_pfns;

=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0if ( domctl->u.cacheflush.nr_pfns >= (1U<<MAX_ORDER) )
-=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 return -EINVAL;
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 {
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 ret =3D -EINVAL;
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 break;
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 }

=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0if ( e < s )
-=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 return -EINVAL;
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 {
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 ret =3D -EINVAL;
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 break;
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 }

-=C2=A0 =C2=A0 =C2=A0 =C2=A0 return p2m_cache_flush(d, s, e);
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 ret =3D p2m_cache_flush(d, s, e);
=C2=A0 =C2=A0 =C2=A0}
+=C2=A0 =C2=A0 break;
+
+=C2=A0 =C2=A0 case XEN_DOMCTL_mem_event_op:
+=C2=A0 =C2=A0 {
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 ret =3D mem_event_domctl(d, &domctl->u.= mem_event_op,
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2= =A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 guest_handle_cast(u_domctl, void));
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 copyback =3D 1;
+=C2=A0 =C2=A0 }
+=C2=A0 =C2=A0 break;

=C2=A0 =C2=A0 =C2=A0default:
-=C2=A0 =C2=A0 =C2=A0 =C2=A0 return subarch_do_domctl(domctl, d, u_domctl);=
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 ret =3D subarch_do_domctl(domctl, d, u_domctl)= ;
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 break;
=C2=A0 =C2=A0 =C2=A0}
+
+=C2=A0 =C2=A0 if ( copyback && __copy_to_guest(u_domctl, domctl, 1= ) )
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 ret =3D -EFAULT;
+
+=C2=A0 =C2=A0 return ret;
=C2=A0}

=C2=A0void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 0a243b0..cd04dec 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -35,6 +35,9 @@
=C2=A0#include <asm/current.h>
=C2=A0#include <asm/flushtlb.h>
=C2=A0#include <public/memory.h>
+#include <xen/mem_event.h>
+#include <xen/mem_access.h>
+#include <xen/hypercall.h>
=C2=A0#include <xen/sched.h>
=C2=A0#include <xen/vmap.h>
=C2=A0#include <xsm/xsm.h>
@@ -1111,18 +1114,27 @@ int xenmem_add_to_physmap_one(

=C2=A0long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
=C2=A0{
-=C2=A0 =C2=A0 switch ( op )
+
+=C2=A0 =C2=A0 long rc;
+
+=C2=A0 =C2=A0 switch ( op & MEMOP_CMD_MASK )
=C2=A0 =C2=A0 =C2=A0{
=C2=A0 =C2=A0 =C2=A0/* XXX: memsharing not working yet */
=C2=A0 =C2=A0 =C2=A0case XENMEM_get_sharing_shared_pages:
=C2=A0 =C2=A0 =C2=A0case XENMEM_get_sharing_freed_pages:
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0return 0;
+=C2=A0 =C2=A0 case XENMEM_access_op:
+=C2=A0 =C2=A0 {
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 rc =3D mem_access_memop(op, guest_handle_cast(= arg, xen_mem_access_op_t));
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 break;
+=C2=A0 =C2=A0 }

=C2=A0 =C2=A0 =C2=A0default:
-=C2=A0 =C2=A0 =C2=A0 =C2=A0 return -ENOSYS;
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 rc =3D -ENOSYS;
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 break;
=C2=A0 =C2=A0 =C2=A0}

-=C2=A0 =C2=A0 return 0;
+=C2=A0 =C2=A0 return rc;
=C2=A0}

=C2=A0struct domain *page_get_owner_and_reference(struct page_info *page) diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 143199b..0ca0d2f 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -10,6 +10,9 @@
=C2=A0#include <asm/event.h>
=C2=A0#include <asm/hardirq.h>
=C2=A0#include <asm/page.h>
+#include <xen/mem_event.h>
+#include <public/mem_event.h>
+#include <xen/mem_access.h>

=C2=A0/* First level P2M is 2 consecutive pages */
=C2=A0#define P2M_FIRST_ORDER 1
@@ -999,6 +1002,8 @@ int p2m_init(struct domain *d)
=C2=A0 =C2=A0 =C2=A0p2m->max_mapped_gfn =3D 0;
=C2=A0 =C2=A0 =C2=A0p2m->lowest_mapped_gfn =3D ULONG_MAX;

+=C2=A0 =C2=A0 p2m->default_access =3D p2m_access_rwx;
+
=C2=A0err:
=C2=A0 =C2=A0 =C2=A0spin_unlock(&p2m->lock);

diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index 84acdf9..6bb9cf4 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -29,8 +29,6 @@
=C2=A0#include <xen/mem_event.h>
=C2=A0#include <xsm/xsm.h>

-#ifdef CONFIG_X86
-
=C2=A0int mem_access_memop(unsigned long cmd,
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2= =A0 XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg)
=C2=A0{
@@ -45,9 +43,11 @@ int mem_access_memop(unsigned long cmd,
=C2=A0 =C2=A0 =C2=A0if ( rc )
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0return rc;

+#ifdef CONFIG_X86
=C2=A0 =C2=A0 =C2=A0rc =3D -EINVAL;
=C2=A0 =C2=A0 =C2=A0if ( !is_hvm_domain(d) )
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0goto out;
+#endif

=C2=A0 =C2=A0 =C2=A0rc =3D xsm_mem_event_op(XSM_DM_PRIV, d, XENMEM_access_o= p);
=C2=A0 =C2=A0 =C2=A0if ( rc )
@@ -125,8 +125,6 @@ int mem_access_send_req(struct domain *d, mem_event_req= uest_t *req)
=C2=A0 =C2=A0 =C2=A0return 0;
=C2=A0}

-#endif /* CONFIG_X86 */
-
=C2=A0/*
=C2=A0 * Local variables:
=C2=A0 * mode: C
diff --git a/xen/common/mem_event.c b/xen/common/mem_event.c
index a94ddf6..2a91928 100644
--- a/xen/common/mem_event.c
+++ b/xen/common/mem_event.c
@@ -20,16 +20,19 @@
=C2=A0 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA=C2=A0 021= 11-1307=C2=A0 USA
=C2=A0 */

-#ifdef CONFIG_X86
-
+#include <xen/sched.h>
=C2=A0#include <asm/domain.h>
=C2=A0#include <xen/event.h>
=C2=A0#include <xen/wait.h>
=C2=A0#include <asm/p2m.h>
=C2=A0#include <xen/mem_event.h>
=C2=A0#include <xen/mem_access.h>
+
+#ifdef CONFIG_X86
=C2=A0#include <asm/mem_paging.h>
=C2=A0#include <asm/mem_sharing.h>
+#endif
+
=C2=A0#include <xsm/xsm.h>

=C2=A0/* for public/io/ring.h macros */
@@ -427,6 +430,7 @@ static void mem_access_notification(struct vcpu *v, uns= igned int port)
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0p2m_mem_access_resume(v->domain);
=C2=A0}

+#ifdef CONFIG_X86
=C2=A0/* Registered with Xen-bound event channel for incoming notifications= . */
=C2=A0static void mem_paging_notification(struct vcpu *v, unsigned int port= )
=C2=A0{
@@ -470,6 +474,7 @@ int do_mem_event_op(int op, uint32_t domain, void *arg)=
=C2=A0 =C2=A0 =C2=A0rcu_unlock_domain(d);
=C2=A0 =C2=A0 =C2=A0return ret;
=C2=A0}
+#endif

=C2=A0/* Clean up on domain destruction */
=C2=A0void mem_event_cleanup(struct domain *d)
@@ -538,6 +543,8 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_e= vent_op_t *mec,
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0{
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABL= E:
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0{
+
+#ifdef CONFIG_X86
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0rc =3D -ENODEV;
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0/* Only HAP is supported */=
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0if ( !hap_enabled(d) )
@@ -546,6 +553,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_e= vent_op_t *mec,
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0/* Currently only EPT is su= pported */
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0if ( !cpu_has_vmx )
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0break;
+#endif

=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0rc =3D mem_event_enable(d, = mec, med, _VPF_mem_access,
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2= =A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0HVM_PARAM_ACCESS= _RING_PFN,
@@ -567,6 +575,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_e= vent_op_t *mec,
=C2=A0 =C2=A0 =C2=A0}
=C2=A0 =C2=A0 =C2=A0break;

+#ifdef CONFIG_X86
=C2=A0 =C2=A0 =C2=A0case XEN_DOMCTL_MEM_EVENT_OP_PAGING:
=C2=A0 =C2=A0 =C2=A0{
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0struct mem_event_domain *med =3D &d-&= gt;mem_event->paging;
@@ -656,6 +665,7 @@ int mem_event_domctl(struct domain *d, xen_domctl_mem_e= vent_op_t *mec,
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0}
=C2=A0 =C2=A0 =C2=A0}
=C2=A0 =C2=A0 =C2=A0break;
+#endif

=C2=A0 =C2=A0 =C2=A0default:
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0rc =3D -ENOSYS;
@@ -695,7 +705,6 @@ void mem_event_vcpu_unpause(struct vcpu *v)

=C2=A0 =C2=A0 =C2=A0vcpu_unpause(v);
=C2=A0}
-#endif

=C2=A0/*
=C2=A0 * Local variables:
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 06c93a0..f3d1f33 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -2,9 +2,55 @@
=C2=A0#define _XEN_P2M_H

=C2=A0#include <xen/mm.h>
+#include <public/memory.h>
+#include <public/mem_event.h>

=C2=A0struct domain;

+/* List of possible type for each page in the p2m entry.
+ * The number of available bit per page in the pte for this purpose is 4 b= its.
+ * So it's possible to only have 16 fields. If we run out of value in = the
+ * future, it's possible to use higher value for pseudo-type and don&#= 39;t store
+ * them in the p2m entry.
+ */
+typedef enum {
+=C2=A0 =C2=A0 p2m_invalid =3D 0,=C2=A0 =C2=A0 /* Nothing mapped here */ +=C2=A0 =C2=A0 p2m_ram_rw,=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0/* Normal read/= write guest RAM */
+=C2=A0 =C2=A0 p2m_ram_ro,=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0/* Read-only; w= rites are silently dropped */
+=C2=A0 =C2=A0 p2m_mmio_direct,=C2=A0 =C2=A0 /* Read/write mapping of genui= ne MMIO area */
+=C2=A0 =C2=A0 p2m_map_foreign,=C2=A0 =C2=A0 /* Ram pages from foreign doma= in */
+=C2=A0 =C2=A0 p2m_grant_map_rw,=C2=A0 =C2=A0/* Read/write grant mapping */=
+=C2=A0 =C2=A0 p2m_grant_map_ro,=C2=A0 =C2=A0/* Read-only grant mapping */<= br> +=C2=A0 =C2=A0 /* The types below are only used to decide the page attribut= e in the P2M */
+=C2=A0 =C2=A0 p2m_iommu_map_rw,=C2=A0 =C2=A0/* Read/write iommu mapping */=
+=C2=A0 =C2=A0 p2m_iommu_map_ro,=C2=A0 =C2=A0/* Read-only iommu mapping */<= br> +=C2=A0 =C2=A0 p2m_max_real_type,=C2=A0 /* Types after this won't be st= ore in the p2m */
+} p2m_type_t;
+
+/*
+ * Additional access types, which are used to further restrict
+ * the permissions given by the p2m_type_t memory type.=C2=A0 Violations + * caused by p2m_access_t restrictions are sent to the mem_event
+ * interface.
+ *
+ * The access permissions are soft state: when any ambigious change of pag= e
+ * type or use occurs, or when pages are flushed, swapped, or at any other=
+ * convenient type, the access permissions can get reset to the p2m_domain=
+ * default.
+ */
+typedef enum {
+=C2=A0 =C2=A0 p2m_access_n=C2=A0 =C2=A0 =C2=A0=3D 0, /* No access permissi= ons allowed */
+=C2=A0 =C2=A0 p2m_access_r=C2=A0 =C2=A0 =C2=A0=3D 1,
+=C2=A0 =C2=A0 p2m_access_w=C2=A0 =C2=A0 =C2=A0=3D 2,
+=C2=A0 =C2=A0 p2m_access_rw=C2=A0 =C2=A0 =3D 3,
+=C2=A0 =C2=A0 p2m_access_x=C2=A0 =C2=A0 =C2=A0=3D 4,
+=C2=A0 =C2=A0 p2m_access_rx=C2=A0 =C2=A0 =3D 5,
+=C2=A0 =C2=A0 p2m_access_wx=C2=A0 =C2=A0 =3D 6,
+=C2=A0 =C2=A0 p2m_access_rwx=C2=A0 =C2=A0=3D 7
+
+=C2=A0 =C2=A0 /* NOTE: Assumed to be only 4 bits right now */
+} p2m_access_t;
+
=C2=A0/* Per-p2m-table state */
=C2=A0struct p2m_domain {
=C2=A0 =C2=A0 =C2=A0/* Lock that protects updates to the p2m */
@@ -38,27 +84,17 @@ struct p2m_domain {
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 * at each p2m tree level. */
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0unsigned long shattered[4];
=C2=A0 =C2=A0 =C2=A0} stats;
-};

-/* List of possible type for each page in the p2m entry.
- * The number of available bit per page in the pte for this purpose is 4 b= its.
- * So it's possible to only have 16 fields. If we run out of value in = the
- * future, it's possible to use higher value for pseudo-type and don&#= 39;t store
- * them in the p2m entry.
- */
-typedef enum {
-=C2=A0 =C2=A0 p2m_invalid =3D 0,=C2=A0 =C2=A0 /* Nothing mapped here */ -=C2=A0 =C2=A0 p2m_ram_rw,=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0/* Normal read/= write guest RAM */
-=C2=A0 =C2=A0 p2m_ram_ro,=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0/* Read-only; w= rites are silently dropped */
-=C2=A0 =C2=A0 p2m_mmio_direct,=C2=A0 =C2=A0 /* Read/write mapping of genui= ne MMIO area */
-=C2=A0 =C2=A0 p2m_map_foreign,=C2=A0 =C2=A0 /* Ram pages from foreign doma= in */
-=C2=A0 =C2=A0 p2m_grant_map_rw,=C2=A0 =C2=A0/* Read/write grant mapping */=
-=C2=A0 =C2=A0 p2m_grant_map_ro,=C2=A0 =C2=A0/* Read-only grant mapping */<= br> -=C2=A0 =C2=A0 /* The types below are only used to decide the page attribut= e in the P2M */
-=C2=A0 =C2=A0 p2m_iommu_map_rw,=C2=A0 =C2=A0/* Read/write iommu mapping */=
-=C2=A0 =C2=A0 p2m_iommu_map_ro,=C2=A0 =C2=A0/* Read-only iommu mapping */<= br> -=C2=A0 =C2=A0 p2m_max_real_type,=C2=A0 /* Types after this won't be st= ore in the p2m */
-} p2m_type_t;
+=C2=A0 =C2=A0 /* Default P2M access type for each page in the the domain: = new pages,
+=C2=A0 =C2=A0 =C2=A0* swapped in pages, cleared pages, and pages that are = ambiquously
+=C2=A0 =C2=A0 =C2=A0* retyped get this access type.=C2=A0 See definition o= f p2m_access_t. */
+=C2=A0 =C2=A0 p2m_access_t default_access;
+
+=C2=A0 =C2=A0 /* If true, and an access fault comes in and there is no mem= _event listener,
+=C2=A0 =C2=A0 =C2=A0* pause domain.=C2=A0 Otherwise, remove access restric= tions. */
+=C2=A0 =C2=A0 bool_t=C2=A0 =C2=A0 =C2=A0 =C2=A0access_required;
+
+};

=C2=A0#define p2m_is_foreign(_t)=C2=A0 ((_t) =3D=3D p2m_map_foreign)
=C2=A0#define p2m_is_ram(_t)=C2=A0 =C2=A0 =C2=A0 ((_t) =3D=3D p2m_ram_rw ||= (_t) =3D=3D p2m_ram_ro)
@@ -195,6 +231,30 @@ static inline int get_page_and_type(struct page_info *= page,
=C2=A0 =C2=A0 =C2=A0return rc;
=C2=A0}

+/* get host p2m table */
+#define p2m_get_hostp2m(d)=C2=A0 =C2=A0 =C2=A0 (&((d)->arch.p2m)) +
+/* Resumes the running of the VCPU, restarting the last instruction */
+static inline void p2m_mem_access_resume(struct domain *d) {}
+
+/* Set access type for a region of pfns.
+ * If start_pfn =3D=3D -1ul, sets the default access type */
+static inline
+long p2m_set_mem_access(struct domain *d, unsigned long start_pfn, uint32_= t nr,
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2= =A0 =C2=A0 uint32_t start, uint32_t mask, xenmem_access_t access)
+{
+=C2=A0 =C2=A0 return -ENOSYS;
+}
+
+/* Get access type for a pfn
+ * If pfn =3D=3D -1ul, gets the default access type */
+static inline
+int p2m_get_mem_access(struct domain *d, unsigned long pfn,
+=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2= =A0 =C2=A0xenmem_access_t *access)
+{
+=C2=A0 =C2=A0 return -ENOSYS;
+}
+
=C2=A0#endif /* _XEN_P2M_H */

=C2=A0/*
diff --git a/xen/include/xen/mem_access.h b/xen/include/xen/mem_access.h index ded5441..5c7c5fd 100644
--- a/xen/include/xen/mem_access.h
+++ b/xen/include/xen/mem_access.h
@@ -23,29 +23,10 @@
=C2=A0#ifndef _XEN_ASM_MEM_ACCESS_H
=C2=A0#define _XEN_ASM_MEM_ACCESS_H

-#ifdef CONFIG_X86
-
=C2=A0int mem_access_memop(unsigned long cmd,
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2= =A0 XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
=C2=A0int mem_access_send_req(struct domain *d, mem_event_request_t *req);<= br>
-#else
-
-static inline
-int mem_access_memop(unsigned long cmd,
-=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2= =A0XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg)
-{
-=C2=A0 =C2=A0 return -ENOSYS;
-}
-
-static inline
-int mem_access_send_req(struct domain *d, mem_event_request_t *req)
-{
-=C2=A0 =C2=A0 return -ENOSYS;
-}
-
-#endif /* CONFIG_X86 */
-
=C2=A0#endif /* _XEN_ASM_MEM_ACCESS_H */

=C2=A0/*
diff --git a/xen/include/xen/mem_event.h b/xen/include/xen/mem_event.h
index a28d453..e2a9d4d 100644
--- a/xen/include/xen/mem_event.h
+++ b/xen/include/xen/mem_event.h
@@ -24,8 +24,6 @@
=C2=A0#ifndef __MEM_EVENT_H__
=C2=A0#define __MEM_EVENT_H__

-#ifdef CONFIG_X86
-
=C2=A0/* Clean up on domain destruction */
=C2=A0void mem_event_cleanup(struct domain *d);

@@ -67,66 +65,25 @@ void mem_event_put_request(struct domain *d, struct mem= _event_domain *med,
=C2=A0int mem_event_get_response(struct domain *d, struct mem_event_domain = *med,
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2= =A0 =C2=A0 =C2=A0 =C2=A0 mem_event_response_t *rsp);

-int do_mem_event_op(int op, uint32_t domain, void *arg);
=C2=A0int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec= ,
=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2= =A0 XEN_GUEST_HANDLE_PARAM(void) u_domctl);

=C2=A0void mem_event_vcpu_pause(struct vcpu *v);
=C2=A0void mem_event_vcpu_unpause(struct vcpu *v);

-#else
-
-static inline void mem_event_cleanup(struct domain *d) {}
-
-static inline bool_t mem_event_check_ring(struct mem_event_domain *med) -{
-=C2=A0 =C2=A0 return 0;
-}
-
-static inline int mem_event_claim_slot(struct domain *d,
-=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2= =A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 struct m= em_event_domain *med)
-{
-=C2=A0 =C2=A0 return -ENOSYS;
-}
-
-static inline int mem_event_claim_slot_nosleep(struct domain *d,
-=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2= =A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 struct m= em_event_domain *med)
-{
-=C2=A0 =C2=A0 return -ENOSYS;
-}
-
-static inline
-void mem_event_cancel_slot(struct domain *d, struct mem_event_domain *med)=
-{}
-
-static inline
-void mem_event_put_request(struct domain *d, struct mem_event_domain *med,=
-=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2= =A0 =C2=A0 =C2=A0 =C2=A0 mem_event_request_t *req)
-{}
+#ifdef CONFIG_X86

-static inline
-int mem_event_get_response(struct domain *d, struct mem_event_domain *med,=
-=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2= =A0 =C2=A0 =C2=A0 =C2=A0mem_event_response_t *rsp)
-{
-=C2=A0 =C2=A0 return -ENOSYS;
-}
+int do_mem_event_op(int op, uint32_t domain, void *arg);

-static inline int do_mem_event_op(int op, uint32_t domain, void *arg)
-{
-=C2=A0 =C2=A0 return -ENOSYS;
-}
+#else

=C2=A0static inline
-int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec,
-=C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2=A0 =C2= =A0XEN_GUEST_HANDLE_PARAM(void) u_domctl)
+int do_mem_event_op(int op, uint32_t domain, void *arg)
=C2=A0{
=C2=A0 =C2=A0 =C2=A0return -ENOSYS;
=C2=A0}

-static inline void mem_event_vcpu_pause(struct vcpu *v) {}
-static inline void mem_event_vcpu_unpause(struct vcpu *v) {}
-
-#endif /* CONFIG_X86 */
+#endif

=C2=A0#endif /* __MEM_EVENT_H__ */

diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 4575dda..2365fad 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -1,4 +1,3 @@
-
=C2=A0#ifndef __SCHED_H__
=C2=A0#define __SCHED_H__

diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index c5aa316..61677ea 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -507,6 +507,18 @@ static XSM_INLINE int xsm_hvm_param_nested(XSM_DEFAULT= _ARG struct domain *d)
=C2=A0 =C2=A0 =C2=A0return xsm_default_action(action, current->domain, d= );
=C2=A0}

+static XSM_INLINE int xsm_mem_event_control(XSM_DEFAULT_ARG struct domain = *d, int mode, int op)
+{
+=C2=A0 =C2=A0 XSM_ASSERT_ACTION(XSM_PRIV);
+=C2=A0 =C2=A0 return xsm_default_action(action, current->domain, d); +}
+
+static XSM_INLINE int xsm_mem_event_op(XSM_DEFAULT_ARG struct domain *d, i= nt op)
+{
+=C2=A0 =C2=A0 XSM_ASSERT_ACTION(XSM_DM_PRIV);
+=C2=A0 =C2=A0 return xsm_default_action(action, current->domain, d); +}
+
=C2=A0#ifdef CONFIG_X86
=C2=A0static XSM_INLINE int xsm_do_mca(XSM_DEFAULT_VOID)
=C2=A0{
@@ -550,18 +562,6 @@ static XSM_INLINE int xsm_hvm_ioreq_server(XSM_DEFAULT= _ARG struct domain *d, int
=C2=A0 =C2=A0 =C2=A0return xsm_default_action(action, current->domain, d= );
=C2=A0}

-static XSM_INLINE int xsm_mem_event_control(XSM_DEFAULT_ARG struct domain = *d, int mode, int op)
-{
-=C2=A0 =C2=A0 XSM_ASSERT_ACTION(XSM_PRIV);
-=C2=A0 =C2=A0 return xsm_default_action(action, current->domain, d); -}
-
-static XSM_INLINE int xsm_mem_event_op(XSM_DEFAULT_ARG struct domain *d, i= nt op)
-{
-=C2=A0 =C2=A0 XSM_ASSERT_ACTION(XSM_DM_PRIV);
-=C2=A0 =C2=A0 return xsm_default_action(action, current->domain, d); -}
-
=C2=A0static XSM_INLINE int xsm_mem_sharing_op(XSM_DEFAULT_ARG struct domai= n *d, struct domain *cd, int op)
=C2=A0{
=C2=A0 =C2=A0 =C2=A0XSM_ASSERT_ACTION(XSM_DM_PRIV);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index a85045d..64289cd 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -140,6 +140,9 @@ struct xsm_operations {
=C2=A0 =C2=A0 =C2=A0int (*hvm_control) (struct domain *d, unsigned long op)= ;
=C2=A0 =C2=A0 =C2=A0int (*hvm_param_nested) (struct domain *d);

+=C2=A0 =C2=A0 int (*mem_event_control) (struct domain *d, int mode, int op= );
+=C2=A0 =C2=A0 int (*mem_event_op) (struct domain *d, int op);
+
=C2=A0#ifdef CONFIG_X86
=C2=A0 =C2=A0 =C2=A0int (*do_mca) (void);
=C2=A0 =C2=A0 =C2=A0int (*shadow_control) (struct domain *d, uint32_t op);<= br> @@ -148,8 +151,6 @@ struct xsm_operations {
=C2=A0 =C2=A0 =C2=A0int (*hvm_set_pci_link_route) (struct domain *d);
=C2=A0 =C2=A0 =C2=A0int (*hvm_inject_msi) (struct domain *d);
=C2=A0 =C2=A0 =C2=A0int (*hvm_ioreq_server) (struct domain *d, int op);
-=C2=A0 =C2=A0 int (*mem_event_control) (struct domain *d, int mode, int op= );
-=C2=A0 =C2=A0 int (*mem_event_op) (struct domain *d, int op);
=C2=A0 =C2=A0 =C2=A0int (*mem_sharing_op) (struct domain *d, struct domain = *cd, int op);
=C2=A0 =C2=A0 =C2=A0int (*apic) (struct domain *d, int cmd);
=C2=A0 =C2=A0 =C2=A0int (*memtype) (uint32_t access);
@@ -534,6 +535,16 @@ static inline int xsm_hvm_param_nested (xsm_default_t = def, struct domain *d)
=C2=A0 =C2=A0 =C2=A0return xsm_ops->hvm_param_nested(d);
=C2=A0}

+static inline int xsm_mem_event_control (xsm_default_t def, struct domain = *d, int mode, int op)
+{
+=C2=A0 =C2=A0 return xsm_ops->mem_event_control(d, mode, op);
+}
+
+static inline int xsm_mem_event_op (xsm_default_t def, struct domain *d, i= nt op)
+{
+=C2=A0 =C2=A0 return xsm_ops->mem_event_op(d, op);
+}
+
=C2=A0#ifdef CONFIG_X86
=C2=A0static inline int xsm_do_mca(xsm_default_t def)
=C2=A0{
@@ -570,16 +581,6 @@ static inline int xsm_hvm_ioreq_server (xsm_default_t = def, struct domain *d, int
=C2=A0 =C2=A0 =C2=A0return xsm_ops->hvm_ioreq_server(d, op);
=C2=A0}

-static inline int xsm_mem_event_control (xsm_default_t def, struct domain = *d, int mode, int op)
-{
-=C2=A0 =C2=A0 return xsm_ops->mem_event_control(d, mode, op);
-}
-
-static inline int xsm_mem_event_op (xsm_default_t def, struct domain *d, i= nt op)
-{
-=C2=A0 =C2=A0 return xsm_ops->mem_event_op(d, op);
-}
-
=C2=A0static inline int xsm_mem_sharing_op (xsm_default_t def, struct domai= n *d, struct domain *cd, int op)
=C2=A0{
=C2=A0 =C2=A0 =C2=A0return xsm_ops->mem_sharing_op(d, cd, op);
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index c95c803..9df9d81 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -116,6 +116,8 @@ void xsm_fixup_ops (struct xsm_operations *ops)
=C2=A0 =C2=A0 =C2=A0set_to_dummy_if_null(ops, add_to_physmap);
=C2=A0 =C2=A0 =C2=A0set_to_dummy_if_null(ops, remove_from_physmap);
=C2=A0 =C2=A0 =C2=A0set_to_dummy_if_null(ops, map_gmfn_foreign);
+=C2=A0 =C2=A0 set_to_dummy_if_null(ops, mem_event_control);
+=C2=A0 =C2=A0 set_to_dummy_if_null(ops, mem_event_op);

=C2=A0#ifdef CONFIG_X86
=C2=A0 =C2=A0 =C2=A0set_to_dummy_if_null(ops, do_mca);
@@ -125,8 +127,6 @@ void xsm_fixup_ops (struct xsm_operations *ops)
=C2=A0 =C2=A0 =C2=A0set_to_dummy_if_null(ops, hvm_set_pci_link_route);
=C2=A0 =C2=A0 =C2=A0set_to_dummy_if_null(ops, hvm_inject_msi);
=C2=A0 =C2=A0 =C2=A0set_to_dummy_if_null(ops, hvm_ioreq_server);
-=C2=A0 =C2=A0 set_to_dummy_if_null(ops, mem_event_control);
-=C2=A0 =C2=A0 set_to_dummy_if_null(ops, mem_event_op);
=C2=A0 =C2=A0 =C2=A0set_to_dummy_if_null(ops, mem_sharing_op);
=C2=A0 =C2=A0 =C2=A0set_to_dummy_if_null(ops, apic);
=C2=A0 =C2=A0 =C2=A0set_to_dummy_if_null(ops, platform_op);
--
2.0.1


--089e010d8406ddcd3605017779e2-- --===============6240843452304135892== Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: inline _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel --===============6240843452304135892==--