* [PATCH 22/23] tmem: add XSM hooks
2012-09-17 15:23 [PATCH v4] Merge IS_PRIV checks into " Daniel De Graaf
@ 2012-09-17 15:23 ` Daniel De Graaf
0 siblings, 0 replies; 2+ messages in thread
From: Daniel De Graaf @ 2012-09-17 15:23 UTC (permalink / raw)
To: xen-devel; +Cc: dgdegra, keir, Dan Magenheimer
This adds a pair of XSM hooks for tmem operations: xsm_tmem_op which
controls any use of tmem, and xsm_tmem_control which allows use of the
TMEM_CONTROL operations. By default, all domains can use tmem while only
IS_PRIV domains can use control operations.
Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
Cc: Dan Magenheimer <dan.magenheimer@oracle.com>
---
tools/flask/policy/policy/flask/access_vectors | 2 ++
xen/common/tmem.c | 3 +++
xen/include/xen/tmem_xen.h | 8 +++++++-
xen/include/xsm/dummy.h | 12 ++++++++++++
xen/include/xsm/xsm.h | 12 ++++++++++++
xen/xsm/dummy.c | 2 ++
xen/xsm/flask/hooks.c | 12 ++++++++++++
xen/xsm/flask/include/av_perm_to_string.h | 2 ++
xen/xsm/flask/include/av_permissions.h | 2 ++
9 files changed, 54 insertions(+), 1 deletion(-)
diff --git a/tools/flask/policy/policy/flask/access_vectors b/tools/flask/policy/policy/flask/access_vectors
index caf65d2..7a7e253 100644
--- a/tools/flask/policy/policy/flask/access_vectors
+++ b/tools/flask/policy/policy/flask/access_vectors
@@ -35,6 +35,8 @@ class xen
lockprof
cpupool_op
sched_op
+ tmem_op
+ tmem_control
}
class domain
diff --git a/xen/common/tmem.c b/xen/common/tmem.c
index f4812b9..6d95296 100644
--- a/xen/common/tmem.c
+++ b/xen/common/tmem.c
@@ -2636,6 +2636,9 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops)
if ( !tmem_initialized )
return -ENODEV;
+ if ( !tmh_current_permitted() )
+ return -EPERM;
+
total_tmem_ops++;
if ( tmh_lock_all )
diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h
index 9492810..ae550af 100644
--- a/xen/include/xen/tmem_xen.h
+++ b/xen/include/xen/tmem_xen.h
@@ -16,6 +16,7 @@
#include <xen/guest_access.h> /* copy_from_guest */
#include <xen/hash.h> /* hash_long */
#include <xen/domain_page.h> /* __map_domain_page */
+#include <xsm/xsm.h> /* xsm_tmem_control */
#include <public/tmem.h>
#ifdef CONFIG_COMPAT
#include <compat/tmem.h>
@@ -326,9 +327,14 @@ static inline bool_t tmh_set_client_from_id(
return rc;
}
+static inline bool_t tmh_current_permitted(void)
+{
+ return !xsm_tmem_op();
+}
+
static inline bool_t tmh_current_is_privileged(void)
{
- return IS_PRIV(current->domain);
+ return !xsm_tmem_control();
}
static inline uint8_t tmh_get_first_byte(pfp_t *pfp)
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index 331c423..277470b 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -443,6 +443,18 @@ static XSM_DEFAULT(int, sched_op) (void)
return 0;
}
+static XSM_DEFAULT(int, tmem_op) (void)
+{
+ return 0;
+}
+
+static XSM_DEFAULT(int, tmem_control) (void)
+{
+ if ( !IS_PRIV(current->domain) )
+ return -EPERM;
+ return 0;
+}
+
static XSM_DEFAULT(long, do_xsm_op)(XEN_GUEST_HANDLE(xsm_op_t) op)
{
return -ENOSYS;
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index db4902d..8eb3775 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -134,6 +134,8 @@ struct xsm_operations {
int (*lockprof)(void);
int (*cpupool_op)(void);
int (*sched_op)(void);
+ int (*tmem_op)(void);
+ int (*tmem_control)(void);
long (*do_xsm_op) (XEN_GUEST_HANDLE(xsm_op_t) op);
@@ -610,6 +612,16 @@ static inline int xsm_sched_op(void)
return xsm_ops->sched_op();
}
+static inline int xsm_tmem_op(void)
+{
+ return xsm_ops->tmem_op();
+}
+
+static inline int xsm_tmem_control(void)
+{
+ return xsm_ops->tmem_control();
+}
+
static inline long xsm_do_xsm_op (XEN_GUEST_HANDLE(xsm_op_t) op)
{
return xsm_ops->do_xsm_op(op);
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index 26e04d5..6e113fb 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -120,6 +120,8 @@ void xsm_fixup_ops (struct xsm_operations *ops)
set_to_dummy_if_null(ops, lockprof);
set_to_dummy_if_null(ops, cpupool_op);
set_to_dummy_if_null(ops, sched_op);
+ set_to_dummy_if_null(ops, tmem_op);
+ set_to_dummy_if_null(ops, tmem_control);
set_to_dummy_if_null(ops, do_xsm_op);
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index a0f34b9..b59e4bb 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1079,6 +1079,16 @@ static inline int flask_sched_op(void)
return domain_has_xen(current->domain, XEN__SCHED_OP);
}
+static inline int flask_tmem_op(void)
+{
+ return domain_has_xen(current->domain, XEN__TMEM_OP);
+}
+
+static inline int flask_tmem_control(void)
+{
+ return domain_has_xen(current->domain, XEN__TMEM_CONTROL);
+}
+
static int flask_perfcontrol(void)
{
return domain_has_xen(current->domain, XEN__PERFCONTROL);
@@ -1723,6 +1733,8 @@ static struct xsm_operations flask_ops = {
.lockprof = flask_lockprof,
.cpupool_op = flask_cpupool_op,
.sched_op = flask_sched_op,
+ .tmem_op = flask_tmem_op,
+ .tmem_control = flask_tmem_control,
.do_xsm_op = do_flask_op,
diff --git a/xen/xsm/flask/include/av_perm_to_string.h b/xen/xsm/flask/include/av_perm_to_string.h
index 79d5939..c3f2370 100644
--- a/xen/xsm/flask/include/av_perm_to_string.h
+++ b/xen/xsm/flask/include/av_perm_to_string.h
@@ -29,6 +29,8 @@
S_(SECCLASS_XEN, XEN__LOCKPROF, "lockprof")
S_(SECCLASS_XEN, XEN__CPUPOOL_OP, "cpupool_op")
S_(SECCLASS_XEN, XEN__SCHED_OP, "sched_op")
+ S_(SECCLASS_XEN, XEN__TMEM_OP, "tmem_op")
+ S_(SECCLASS_XEN, XEN__TMEM_CONTROL, "tmem_control")
S_(SECCLASS_DOMAIN, DOMAIN__SETVCPUCONTEXT, "setvcpucontext")
S_(SECCLASS_DOMAIN, DOMAIN__PAUSE, "pause")
S_(SECCLASS_DOMAIN, DOMAIN__UNPAUSE, "unpause")
diff --git a/xen/xsm/flask/include/av_permissions.h b/xen/xsm/flask/include/av_permissions.h
index d982328..65302e8 100644
--- a/xen/xsm/flask/include/av_permissions.h
+++ b/xen/xsm/flask/include/av_permissions.h
@@ -29,6 +29,8 @@
#define XEN__LOCKPROF 0x08000000UL
#define XEN__CPUPOOL_OP 0x10000000UL
#define XEN__SCHED_OP 0x20000000UL
+#define XEN__TMEM_OP 0x40000000UL
+#define XEN__TMEM_CONTROL 0x80000000UL
#define DOMAIN__SETVCPUCONTEXT 0x00000001UL
#define DOMAIN__PAUSE 0x00000002UL
--
1.7.11.4
^ permalink raw reply related [flat|nested] 2+ messages in thread