From: David Vrabel <david.vrabel@citrix.com>
To: xen-devel@lists.xen.org
Cc: Keir Fraser <keir@xen.org>,
David Vrabel <david.vrabel@citrix.com>,
Jan Beulich <jbeulich@suse.com>
Subject: [PATCH 09/11] xen: Add DOMCTL to limit the number of event channels a domain may use
Date: Tue, 8 Oct 2013 13:40:43 +0100 [thread overview]
Message-ID: <1381236045-27020-10-git-send-email-david.vrabel@citrix.com> (raw)
In-Reply-To: <1381236045-27020-1-git-send-email-david.vrabel@citrix.com>
From: David Vrabel <david.vrabel@citrix.com>
Add XEN_DOMCTL_set_max_evtchn which may be used during domain creation to
set the maximum event channel port a domain may use. This may be used to
limit the amount of Xen resources (global mapping space and xenheap) that
a domain may use for event channels.
A domain that does not have a limit set may use all the event channels
supported by the event channel ABI in use.
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
---
tools/flask/policy/policy/modules/xen/xen.if | 2 +-
tools/flask/policy/policy/modules/xen/xen.te | 2 +-
xen/common/domctl.c | 8 ++++++++
xen/common/event_channel.c | 7 ++++++-
xen/include/public/domctl.h | 13 +++++++++++++
xen/include/xen/sched.h | 1 +
xen/xsm/flask/hooks.c | 3 +++
xen/xsm/flask/policy/access_vectors | 2 ++
8 files changed, 35 insertions(+), 3 deletions(-)
diff --git a/tools/flask/policy/policy/modules/xen/xen.if b/tools/flask/policy/policy/modules/xen/xen.if
index 97af0a8..dedc035 100644
--- a/tools/flask/policy/policy/modules/xen/xen.if
+++ b/tools/flask/policy/policy/modules/xen/xen.if
@@ -49,7 +49,7 @@ define(`create_domain_common', `
getdomaininfo hypercall setvcpucontext setextvcpucontext
getscheduler getvcpuinfo getvcpuextstate getaddrsize
getaffinity setaffinity };
- allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim };
+ allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim set_max_evtchn };
allow $1 $2:security check_context;
allow $1 $2:shadow enable;
allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage mmuext_op };
diff --git a/tools/flask/policy/policy/modules/xen/xen.te b/tools/flask/policy/policy/modules/xen/xen.te
index c89ce28..bb59fe8 100644
--- a/tools/flask/policy/policy/modules/xen/xen.te
+++ b/tools/flask/policy/policy/modules/xen/xen.te
@@ -76,7 +76,7 @@ allow dom0_t dom0_t:domain {
getpodtarget setpodtarget set_misc_info set_virq_handler
};
allow dom0_t dom0_t:domain2 {
- set_cpuid gettsc settsc setscheduler
+ set_cpuid gettsc settsc setscheduler set_max_evtchn
};
allow dom0_t dom0_t:resource { add remove };
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 9760d50..870eef1 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -863,6 +863,14 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
}
break;
+ case XEN_DOMCTL_set_max_evtchn:
+ {
+ d->max_evtchn_port = min_t(unsigned int,
+ op->u.set_max_evtchn.max_port,
+ INT_MAX);
+ }
+ break;
+
default:
ret = arch_do_domctl(op, d, u_domctl);
break;
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 0c0bbe4..34efd24 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -168,10 +168,14 @@ static int get_free_port(struct domain *d)
return -EINVAL;
for ( port = 0; port_is_valid(d, port); port++ )
+ {
+ if ( port > d->max_evtchn_port )
+ return -ENOSPC;
if ( evtchn_from_port(d, port)->state == ECS_FREE )
return port;
+ }
- if ( port == d->max_evtchns )
+ if ( port == d->max_evtchns || port > d->max_evtchn_port )
return -ENOSPC;
if ( !group_from_port(d, port) )
@@ -1230,6 +1234,7 @@ void evtchn_check_pollers(struct domain *d, unsigned int port)
int evtchn_init(struct domain *d)
{
evtchn_2l_init(d);
+ d->max_evtchn_port = INT_MAX;
d->evtchn = alloc_evtchn_bucket(d, 0);
if ( !d->evtchn )
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 4c5b2bb..d4e479f 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -852,6 +852,17 @@ struct xen_domctl_set_broken_page_p2m {
typedef struct xen_domctl_set_broken_page_p2m xen_domctl_set_broken_page_p2m_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_broken_page_p2m_t);
+/*
+ * XEN_DOMCTL_set_max_evtchn: sets the maximum event channel port
+ * number the guest may use. Use this limit the amount of resources
+ * (global mapping space, xenheap) a guest may use for event channels.
+ */
+struct xen_domctl_set_max_evtchn {
+ uint32_t max_port;
+};
+typedef struct xen_domctl_set_max_evtchn xen_domctl_set_max_evtchn_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_max_evtchn_t);
+
struct xen_domctl {
uint32_t cmd;
#define XEN_DOMCTL_createdomain 1
@@ -920,6 +931,7 @@ struct xen_domctl {
#define XEN_DOMCTL_set_broken_page_p2m 67
#define XEN_DOMCTL_setnodeaffinity 68
#define XEN_DOMCTL_getnodeaffinity 69
+#define XEN_DOMCTL_set_max_evtchn 70
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -975,6 +987,7 @@ struct xen_domctl {
struct xen_domctl_set_access_required access_required;
struct xen_domctl_audit_p2m audit_p2m;
struct xen_domctl_set_virq_handler set_virq_handler;
+ struct xen_domctl_set_max_evtchn set_max_evtchn;
struct xen_domctl_gdbsx_memio gdbsx_guest_memio;
struct xen_domctl_set_broken_page_p2m set_broken_page_p2m;
struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index c9c3bc2..8c7d963 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -291,6 +291,7 @@ struct domain
struct evtchn *evtchn; /* first bucket only */
struct evtchn **evtchn_group[NR_EVTCHN_GROUPS]; /* all other buckets */
unsigned int max_evtchns;
+ unsigned int max_evtchn_port;
spinlock_t event_lock;
const struct evtchn_port_ops *evtchn_port_ops;
struct evtchn_fifo_domain *evtchn_fifo;
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index fa0589a..b1e2593 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -727,6 +727,9 @@ static int flask_domctl(struct domain *d, int cmd)
case XEN_DOMCTL_audit_p2m:
return current_has_perm(d, SECCLASS_HVM, HVM__AUDIT_P2M);
+ case XEN_DOMCTL_set_max_evtchn:
+ return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SET_MAX_EVTCHN);
+
default:
printk("flask_domctl: Unknown op %d\n", cmd);
return -EPERM;
diff --git a/xen/xsm/flask/policy/access_vectors b/xen/xsm/flask/policy/access_vectors
index 5dfe13b..1fbe241 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -194,6 +194,8 @@ class domain2
setscheduler
# XENMEM_claim_pages
setclaim
+# XEN_DOMCTL_set_max_evtchn
+ set_max_evtchn
}
# Similar to class domain, but primarily contains domctls related to HVM domains
--
1.7.2.5
next prev parent reply other threads:[~2013-10-08 12:40 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-10-08 12:40 [PATCHv6 00/11] Xen: FIFO-based event channel ABI David Vrabel
2013-10-08 12:40 ` [PATCH 01/11] debug: remove some event channel info from the 'i' and 'q' debug keys David Vrabel
2013-10-08 12:40 ` [PATCH 02/11] evtchn: refactor low-level event channel port ops David Vrabel
2013-10-08 12:40 ` [PATCH 03/11] evtchn: print ABI specific state with the 'e' debug key David Vrabel
2013-10-08 12:40 ` [PATCH 04/11] evtchn: use a per-domain variable for the max number of event channels David Vrabel
2013-10-08 12:40 ` [PATCH 05/11] evtchn: allow many more evtchn objects to be allocated per domain David Vrabel
2013-10-08 12:40 ` [PATCH 06/11] evtchn: add FIFO-based event channel ABI David Vrabel
2013-10-08 12:40 ` [PATCH 07/11] evtchn: implement EVTCHNOP_set_priority and add the set_priority hook David Vrabel
2013-10-08 12:40 ` [PATCH 08/11] evtchn: add FIFO-based event channel hypercalls and port ops David Vrabel
2013-10-08 14:59 ` Jan Beulich
2013-10-08 16:48 ` David Vrabel
2013-10-08 12:40 ` David Vrabel [this message]
2013-10-08 12:40 ` [PATCH 10/11] libxc: add xc_domain_set_max_evtchn() David Vrabel
2013-10-08 12:40 ` [PATCH 11/11] libxl, xl: add max_event_channels option to xl configuration file David Vrabel
2013-10-08 16:51 ` [PATCHv6 00/11] Xen: FIFO-based event channel ABI David Vrabel
2013-10-14 7:29 ` Keir Fraser
-- strict thread matches above, loose matches on Subject: below --
2013-10-02 16:35 [PATCHv5 0/11] " David Vrabel
2013-10-02 16:35 ` [PATCH 09/11] xen: Add DOMCTL to limit the number of event channels a domain may use David Vrabel
2013-10-02 17:06 ` David Vrabel
2013-10-04 11:56 ` David Vrabel
2013-10-04 16:02 ` Jan Beulich
2013-10-07 16:00 ` Daniel De Graaf
2013-09-27 10:55 [PATCHv4 0/11] Xen: FIFO-based event channel ABI David Vrabel
2013-09-27 10:55 ` [PATCH 09/11] xen: Add DOMCTL to limit the number of event channels a domain may use David Vrabel
2013-09-27 12:40 ` Jan Beulich
2013-09-27 14:29 ` Daniel De Graaf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1381236045-27020-10-git-send-email-david.vrabel@citrix.com \
--to=david.vrabel@citrix.com \
--cc=jbeulich@suse.com \
--cc=keir@xen.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).