From: George Dunlap <george.dunlap@eu.citrix.com>
To: Daniel De Graaf <dgdegra@tycho.nsa.gov>
Cc: "Keir (Xen.org)" <keir@xen.org>,
"xen-devel@lists.xen.org" <xen-devel@lists.xen.org>
Subject: Re: [PATCH 20/22] xen/xsm: distinguish scheduler get/set operations
Date: Tue, 15 Jan 2013 16:50:11 +0000 [thread overview]
Message-ID: <50F588C3.5090402@eu.citrix.com> (raw)
In-Reply-To: <1357832026-27659-21-git-send-email-dgdegra@tycho.nsa.gov>
On 10/01/13 15:33, Daniel De Graaf wrote:
> Add getscheduler and setscheduler permissions to replace the monolithic
> scheduler permission in the scheduler_op domctl and sysctl.
>
> Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
> Cc: George Dunlap <george.dunlap@eu.citrix.com>
> Cc: Keir Fraser <keir@xen.org>
Haven't reviewed the code in detail, but in principle it sounds like a
great idea:
Acked-by: George Dunlap <george.dunlap@eu.citrix.com>
> ---
> tools/flask/policy/policy/modules/xen/xen.if | 6 ++--
> tools/flask/policy/policy/modules/xen/xen.te | 4 +--
> xen/common/schedule.c | 10 ++++++-
> xen/include/xsm/dummy.h | 12 ++++++++
> xen/include/xsm/xsm.h | 12 ++++++++
> xen/xsm/dummy.c | 2 ++
> xen/xsm/flask/hooks.c | 44 +++++++++++++++++++++++-----
> xen/xsm/flask/policy/access_vectors | 7 +++--
> 8 files changed, 81 insertions(+), 16 deletions(-)
>
> diff --git a/tools/flask/policy/policy/modules/xen/xen.if b/tools/flask/policy/policy/modules/xen/xen.if
> index d9d5344..2ce2212 100644
> --- a/tools/flask/policy/policy/modules/xen/xen.if
> +++ b/tools/flask/policy/policy/modules/xen/xen.if
> @@ -47,9 +47,9 @@ define(`declare_build_label', `
> define(`create_domain_common', `
> allow $1 $2:domain { create max_vcpus setdomainmaxmem setaddrsize
> getdomaininfo hypercall setvcpucontext setextvcpucontext
> - scheduler getvcpuinfo getvcpuextstate getaddrsize
> + getscheduler getvcpuinfo getvcpuextstate getaddrsize
> getvcpuaffinity setvcpuaffinity };
> - allow $1 $2:domain2 { set_cpuid settsc };
> + allow $1 $2:domain2 { set_cpuid settsc setscheduler };
> allow $1 $2:security check_context;
> allow $1 $2:shadow enable;
> allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage mmuext_op };
> @@ -79,7 +79,7 @@ define(`create_domain_build_label', `
> define(`manage_domain', `
> allow $1 $2:domain { getdomaininfo getvcpuinfo getvcpuaffinity
> getaddrsize pause unpause trigger shutdown destroy
> - setvcpuaffinity setdomainmaxmem };
> + setvcpuaffinity setdomainmaxmem getscheduler };
> ')
>
> # migrate_domain_out(priv, target)
> diff --git a/tools/flask/policy/policy/modules/xen/xen.te b/tools/flask/policy/policy/modules/xen/xen.te
> index c714dcb..955fd8b 100644
> --- a/tools/flask/policy/policy/modules/xen/xen.te
> +++ b/tools/flask/policy/policy/modules/xen/xen.te
> @@ -55,8 +55,8 @@ type device_t, resource_type;
> #
> ################################################################################
> allow dom0_t xen_t:xen { kexec readapic writeapic mtrr_read mtrr_add mtrr_del
> - scheduler physinfo heap quirk readconsole writeconsole settime getcpuinfo
> - microcode cpupool_op sched_op pm_op tmem_control };
> + physinfo heap quirk readconsole writeconsole settime getcpuinfo
> + microcode cpupool_op pm_op tmem_control getscheduler setscheduler };
> allow dom0_t xen_t:mmu { memorymap };
> allow dom0_t security_t:security { check_context compute_av compute_create
> compute_member load_policy compute_relabel compute_user setenforce
> diff --git a/xen/common/schedule.c b/xen/common/schedule.c
> index 903f32d..f3fc6bc 100644
> --- a/xen/common/schedule.c
> +++ b/xen/common/schedule.c
> @@ -1006,7 +1006,11 @@ int sched_id(void)
> long sched_adjust(struct domain *d, struct xen_domctl_scheduler_op *op)
> {
> long ret;
> -
> +
> + ret = xsm_domctl_scheduler_op(XSM_HOOK, d, op->cmd);
> + if ( ret )
> + return ret;
> +
> if ( (op->sched_id != DOM2OP(d)->sched_id) ||
> ((op->cmd != XEN_DOMCTL_SCHEDOP_putinfo) &&
> (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo)) )
> @@ -1025,6 +1029,10 @@ long sched_adjust_global(struct xen_sysctl_scheduler_op *op)
> struct cpupool *pool;
> int rc;
>
> + rc = xsm_sysctl_scheduler_op(XSM_HOOK, op->cmd);
> + if ( rc )
> + return rc;
> +
> if ( (op->cmd != XEN_DOMCTL_SCHEDOP_putinfo) &&
> (op->cmd != XEN_DOMCTL_SCHEDOP_getinfo) )
> return -EINVAL;
> diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
> index 2c750de..18f36b2 100644
> --- a/xen/include/xsm/dummy.h
> +++ b/xen/include/xsm/dummy.h
> @@ -95,6 +95,18 @@ static XSM_INLINE int xsm_getdomaininfo(XSM_DEFAULT_ARG struct domain *d)
> return xsm_default_action(action, current->domain, d);
> }
>
> +static XSM_INLINE int xsm_domctl_scheduler_op(XSM_DEFAULT_ARG struct domain *d, int cmd)
> +{
> + XSM_ASSERT_ACTION(XSM_HOOK);
> + return xsm_default_action(action, current->domain, d);
> +}
> +
> +static XSM_INLINE int xsm_sysctl_scheduler_op(XSM_DEFAULT_ARG int cmd)
> +{
> + XSM_ASSERT_ACTION(XSM_HOOK);
> + return xsm_default_action(action, current->domain, NULL);
> +}
> +
> static XSM_INLINE int xsm_set_target(XSM_DEFAULT_ARG struct domain *d, struct domain *e)
> {
> XSM_ASSERT_ACTION(XSM_HOOK);
> diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
> index ce5ede8..8947372 100644
> --- a/xen/include/xsm/xsm.h
> +++ b/xen/include/xsm/xsm.h
> @@ -55,6 +55,8 @@ struct xsm_operations {
> struct xen_domctl_getdomaininfo *info);
> int (*domain_create) (struct domain *d, u32 ssidref);
> int (*getdomaininfo) (struct domain *d);
> + int (*domctl_scheduler_op) (struct domain *d, int op);
> + int (*sysctl_scheduler_op) (int op);
> int (*set_target) (struct domain *d, struct domain *e);
> int (*domctl) (struct domain *d, int cmd);
> int (*sysctl) (int cmd);
> @@ -177,6 +179,16 @@ static inline int xsm_getdomaininfo (xsm_default_t def, struct domain *d)
> return xsm_ops->getdomaininfo(d);
> }
>
> +static inline int xsm_domctl_scheduler_op (xsm_default_t def, struct domain *d, int cmd)
> +{
> + return xsm_ops->domctl_scheduler_op(d, cmd);
> +}
> +
> +static inline int xsm_sysctl_scheduler_op (xsm_default_t def, int cmd)
> +{
> + return xsm_ops->sysctl_scheduler_op(cmd);
> +}
> +
> static inline int xsm_set_target (xsm_default_t def, struct domain *d, struct domain *e)
> {
> return xsm_ops->set_target(d, e);
> diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
> index 22c66e5..529a724 100644
> --- a/xen/xsm/dummy.c
> +++ b/xen/xsm/dummy.c
> @@ -32,6 +32,8 @@ void xsm_fixup_ops (struct xsm_operations *ops)
> set_to_dummy_if_null(ops, security_domaininfo);
> set_to_dummy_if_null(ops, domain_create);
> set_to_dummy_if_null(ops, getdomaininfo);
> + set_to_dummy_if_null(ops, domctl_scheduler_op);
> + set_to_dummy_if_null(ops, sysctl_scheduler_op);
> set_to_dummy_if_null(ops, set_target);
> set_to_dummy_if_null(ops, domctl);
> set_to_dummy_if_null(ops, sysctl);
> diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
> index 222ab3e..ba67502 100644
> --- a/xen/xsm/flask/hooks.c
> +++ b/xen/xsm/flask/hooks.c
> @@ -517,6 +517,38 @@ static int flask_getdomaininfo(struct domain *d)
> return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__GETDOMAININFO);
> }
>
> +static int flask_domctl_scheduler_op(struct domain *d, int op)
> +{
> + switch ( op )
> + {
> + case XEN_DOMCTL_SCHEDOP_putinfo:
> + return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SETSCHEDULER);
> +
> + case XEN_DOMCTL_SCHEDOP_getinfo:
> + return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__GETSCHEDULER);
> +
> + default:
> + printk("flask_domctl_scheduler_op: Unknown op %d\n", op);
> + return -EPERM;
> + }
> +}
> +
> +static int flask_sysctl_scheduler_op(int op)
> +{
> + switch ( op )
> + {
> + case XEN_DOMCTL_SCHEDOP_putinfo:
> + return domain_has_xen(current->domain, XEN__SETSCHEDULER);
> +
> + case XEN_DOMCTL_SCHEDOP_getinfo:
> + return domain_has_xen(current->domain, XEN__GETSCHEDULER);
> +
> + default:
> + printk("flask_domctl_scheduler_op: Unknown op %d\n", op);
> + return -EPERM;
> + }
> +}
> +
> static int flask_set_target(struct domain *d, struct domain *t)
> {
> int rc;
> @@ -548,6 +580,7 @@ static int flask_domctl(struct domain *d, int cmd)
> /* These have individual XSM hooks (common/domctl.c) */
> case XEN_DOMCTL_createdomain:
> case XEN_DOMCTL_getdomaininfo:
> + case XEN_DOMCTL_scheduler_op:
> case XEN_DOMCTL_irq_permission:
> case XEN_DOMCTL_iomem_permission:
> case XEN_DOMCTL_set_target:
> @@ -586,9 +619,6 @@ static int flask_domctl(struct domain *d, int cmd)
> case XEN_DOMCTL_resumedomain:
> return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__RESUME);
>
> - case XEN_DOMCTL_scheduler_op:
> - return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__SCHEDULER);
> -
> case XEN_DOMCTL_max_vcpus:
> return current_has_perm(d, SECCLASS_DOMAIN, DOMAIN__MAX_VCPUS);
>
> @@ -704,6 +734,7 @@ static int flask_sysctl(int cmd)
> case XEN_SYSCTL_readconsole:
> case XEN_SYSCTL_getdomaininfolist:
> case XEN_SYSCTL_page_offline_op:
> + case XEN_SYSCTL_scheduler_op:
> #ifdef CONFIG_X86
> case XEN_SYSCTL_cpu_hotplug:
> #endif
> @@ -713,7 +744,7 @@ static int flask_sysctl(int cmd)
> return domain_has_xen(current->domain, XEN__TBUFCONTROL);
>
> case XEN_SYSCTL_sched_id:
> - return domain_has_xen(current->domain, XEN__SCHEDULER);
> + return domain_has_xen(current->domain, XEN__GETSCHEDULER);
>
> case XEN_SYSCTL_perfc_op:
> return domain_has_xen(current->domain, XEN__PERFCONTROL);
> @@ -739,9 +770,6 @@ static int flask_sysctl(int cmd)
> case XEN_SYSCTL_cpupool_op:
> return domain_has_xen(current->domain, XEN__CPUPOOL_OP);
>
> - case XEN_SYSCTL_scheduler_op:
> - return domain_has_xen(current->domain, XEN__SCHED_OP);
> -
> case XEN_SYSCTL_physinfo:
> case XEN_SYSCTL_topologyinfo:
> case XEN_SYSCTL_numainfo:
> @@ -1408,6 +1436,8 @@ static struct xsm_operations flask_ops = {
> .security_domaininfo = flask_security_domaininfo,
> .domain_create = flask_domain_create,
> .getdomaininfo = flask_getdomaininfo,
> + .domctl_scheduler_op = flask_domctl_scheduler_op,
> + .sysctl_scheduler_op = flask_sysctl_scheduler_op,
> .set_target = flask_set_target,
> .domctl = flask_domctl,
> .sysctl = flask_sysctl,
> diff --git a/xen/xsm/flask/policy/access_vectors b/xen/xsm/flask/policy/access_vectors
> index 7a7e253..b982cf5 100644
> --- a/xen/xsm/flask/policy/access_vectors
> +++ b/xen/xsm/flask/policy/access_vectors
> @@ -5,7 +5,6 @@
>
> class xen
> {
> - scheduler
> settime
> tbufcontrol
> readconsole
> @@ -34,9 +33,10 @@ class xen
> mca_op
> lockprof
> cpupool_op
> - sched_op
> tmem_op
> tmem_control
> + getscheduler
> + setscheduler
> }
>
> class domain
> @@ -51,7 +51,7 @@ class domain
> destroy
> setvcpuaffinity
> getvcpuaffinity
> - scheduler
> + getscheduler
> getdomaininfo
> getvcpuinfo
> getvcpucontext
> @@ -85,6 +85,7 @@ class domain2
> set_cpuid
> gettsc
> settsc
> + setscheduler
> }
>
> class hvm
next prev parent reply other threads:[~2013-01-15 16:50 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-01-10 15:33 [PATCH v7 00/22] Merge IS_PRIV checks into XSM hooks Daniel De Graaf
2013-01-10 15:33 ` [PATCH 01/22] xsm: Use the dummy XSM module if XSM is disabled Daniel De Graaf
2013-01-10 15:33 ` [PATCH 02/22] flask: move policy headers into hypervisor Daniel De Graaf
2013-01-10 15:33 ` [PATCH 03/22] arch/x86: add distinct XSM hooks for map/unmap Daniel De Graaf
2013-01-10 15:33 ` [PATCH 04/22] xen: use XSM instead of IS_PRIV where duplicated Daniel De Graaf
2013-01-10 15:33 ` [PATCH 05/22] xen: avoid calling rcu_lock_*target_domain when an XSM hook exists Daniel De Graaf
2013-01-10 15:33 ` [PATCH 06/22] xen: convert do_domctl to use XSM Daniel De Graaf
2013-01-10 15:33 ` [PATCH 07/22] xen: convert do_sysctl " Daniel De Graaf
2013-01-10 15:33 ` [PATCH 08/22] arch/x86: convert platform_hypercall " Daniel De Graaf
2013-01-10 15:33 ` [PATCH 09/22] xsm/flask: Add checks on the domain performing the set_target operation Daniel De Graaf
2013-01-10 15:33 ` [PATCH 10/22] xsm/flask: add missing hooks Daniel De Graaf
2013-01-10 15:33 ` [PATCH 11/22] xsm/flask: add distinct SIDs for self/target access Daniel De Graaf
2013-01-10 15:33 ` [PATCH 12/22] arch/x86: Add missing mem_sharing XSM hooks Daniel De Graaf
2013-01-10 15:33 ` [PATCH 13/22] arch/x86: use XSM hooks for get_pg_owner access checks Daniel De Graaf
2013-01-10 15:33 ` [PATCH 14/22] xen: add XSM hook for XENMEM_exchange Daniel De Graaf
2013-01-10 15:33 ` [PATCH 15/22] xen: domctl XSM hook removal Daniel De Graaf
2013-01-10 15:33 ` [PATCH 16/22] xen: sysctl " Daniel De Graaf
2013-01-10 15:33 ` [PATCH 17/22] xen: platform_hypercall " Daniel De Graaf
2013-01-10 15:33 ` [PATCH 18/22] xen/xsm: Add xsm_default parameter to XSM hooks Daniel De Graaf
2013-01-10 15:33 ` [PATCH 19/22] tmem: add " Daniel De Graaf
2013-01-10 15:33 ` [PATCH 20/22] xen/xsm: distinguish scheduler get/set operations Daniel De Graaf
2013-01-15 16:50 ` George Dunlap [this message]
2013-01-10 15:33 ` [PATCH 21/22] xsm/flask: document the access vectors Daniel De Graaf
2013-01-10 15:33 ` [PATCH 22/22] xsm/flask: remove unused permissions Daniel De Graaf
2013-01-11 12:04 ` [PATCH v7 00/22] Merge IS_PRIV checks into XSM hooks Ian Campbell
2013-01-11 12:10 ` Ian Campbell
2013-01-11 13:27 ` Keir Fraser
2013-01-11 13:47 ` Keir Fraser
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=50F588C3.5090402@eu.citrix.com \
--to=george.dunlap@eu.citrix.com \
--cc=dgdegra@tycho.nsa.gov \
--cc=keir@xen.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).