From: dongxiao.xu@intel.com
To: xen-devel@lists.xen.org
Cc: keir@xen.org, Ian.Campbell@citrix.com,
stefano.stabellini@eu.citrix.com, andrew.cooper3@citrix.com,
Ian.Jackson@eu.citrix.com, JBeulich@suse.com,
dgdegra@tycho.nsa.gov
Subject: [PATCH v3 3/7] x86: dynamically attach/detach CQM service for a guest
Date: Fri, 29 Nov 2013 13:48:07 +0800 [thread overview]
Message-ID: <1385704092-89546-4-git-send-email-dongxiao.xu@intel.com> (raw)
In-Reply-To: <1385704092-89546-1-git-send-email-dongxiao.xu@intel.com>
From: Dongxiao Xu <dongxiao.xu@intel.com>
Add hypervisor side support for dynamically attach and detach CQM
services for a certain guest.
Signed-off-by: Jiongxi Li <jiongxi.li@intel.com>
Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
---
xen/arch/x86/domctl.c | 40 ++++++++++++++++++++++++++++++++++++++++
xen/include/public/domctl.h | 14 ++++++++++++++
2 files changed, 54 insertions(+)
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index f7e4586..7007990 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -35,6 +35,7 @@
#include <asm/mem_sharing.h>
#include <asm/xstate.h>
#include <asm/debugger.h>
+#include <asm/pqos.h>
static int gdbsx_guest_mem_io(
domid_t domid, struct xen_domctl_gdbsx_memio *iop)
@@ -1223,6 +1224,45 @@ long arch_do_domctl(
}
break;
+ case XEN_DOMCTL_attach_pqos:
+ {
+ if ( domctl->u.qos_type.flags & XEN_DOMCTL_pqos_cqm )
+ {
+ if ( !system_supports_cqm() )
+ ret = -ENODEV;
+ else if ( d->arch.pqos_cqm_rmid > 0 )
+ ret = -EEXIST;
+ else
+ {
+ ret = alloc_cqm_rmid(d);
+ if ( ret < 0 )
+ ret = -EUSERS;
+ }
+ }
+ else
+ ret = -EINVAL;
+ }
+ break;
+
+ case XEN_DOMCTL_detach_pqos:
+ {
+ if ( domctl->u.qos_type.flags & XEN_DOMCTL_pqos_cqm )
+ {
+ if ( !system_supports_cqm() )
+ ret = -ENODEV;
+ else if ( d->arch.pqos_cqm_rmid > 0 )
+ {
+ free_cqm_rmid(d);
+ ret = 0;
+ }
+ else
+ ret = -ENOENT;
+ }
+ else
+ ret = -EINVAL;
+ }
+ break;
+
default:
ret = iommu_do_domctl(domctl, d, u_domctl);
break;
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 47a850a..800b2f4 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -872,6 +872,17 @@ struct xen_domctl_set_max_evtchn {
typedef struct xen_domctl_set_max_evtchn xen_domctl_set_max_evtchn_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_max_evtchn_t);
+/* XEN_DOMCTL_attach_pqos */
+/* XEN_DOMCTL_detach_pqos */
+struct xen_domctl_qos_type {
+ /* Attach or detach flag for cqm */
+#define _XEN_DOMCTL_pqos_cqm 0
+#define XEN_DOMCTL_pqos_cqm (1U<<_XEN_DOMCTL_pqos_cqm)
+ uint32_t flags;
+};
+typedef struct xen_domctl_qos_type xen_domctl_qos_type_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_qos_type_t);
+
struct xen_domctl {
uint32_t cmd;
#define XEN_DOMCTL_createdomain 1
@@ -941,6 +952,8 @@ struct xen_domctl {
#define XEN_DOMCTL_setnodeaffinity 68
#define XEN_DOMCTL_getnodeaffinity 69
#define XEN_DOMCTL_set_max_evtchn 70
+#define XEN_DOMCTL_attach_pqos 71
+#define XEN_DOMCTL_detach_pqos 72
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -1001,6 +1014,7 @@ struct xen_domctl {
struct xen_domctl_set_broken_page_p2m set_broken_page_p2m;
struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
struct xen_domctl_gdbsx_domstatus gdbsx_domstatus;
+ struct xen_domctl_qos_type qos_type;
uint8_t pad[128];
} u;
};
--
1.7.9.5
next prev parent reply other threads:[~2013-11-29 5:48 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-11-29 5:48 [PATCH v3 0/7] enable Cache QoS Monitoring (CQM) feature dongxiao.xu
2013-11-29 5:48 ` [PATCH v3 1/7] x86: detect and initialize Cache QoS Monitoring feature dongxiao.xu
2013-11-29 13:54 ` Andrew Cooper
2013-11-29 15:05 ` Jan Beulich
2013-11-30 0:41 ` Xu, Dongxiao
2013-12-02 2:17 ` Xu, Dongxiao
2013-12-02 9:22 ` Xu, Dongxiao
2013-11-29 13:59 ` Andrew Cooper
2013-11-30 0:42 ` Xu, Dongxiao
2013-11-29 5:48 ` [PATCH v3 2/7] x86: handle CQM resource when creating/destroying guests dongxiao.xu
2013-11-29 14:18 ` Andrew Cooper
2013-11-29 15:01 ` Andrew Cooper
2013-11-29 15:07 ` Jan Beulich
2013-11-29 5:48 ` dongxiao.xu [this message]
2013-11-29 14:22 ` [PATCH v3 3/7] x86: dynamically attach/detach CQM service for a guest Andrew Cooper
2013-11-29 5:48 ` [PATCH v3 4/7] x86: collect CQM information from all sockets dongxiao.xu
2013-11-29 14:53 ` Andrew Cooper
2013-11-30 1:27 ` Xu, Dongxiao
2013-11-29 5:48 ` [PATCH v3 5/7] x86: enable CQM monitoring for each domain RMID dongxiao.xu
2013-11-29 14:56 ` Andrew Cooper
2013-11-30 1:27 ` Xu, Dongxiao
2013-11-29 5:48 ` [PATCH v3 6/7] xsm: add platform QoS related xsm policies dongxiao.xu
2013-11-29 15:50 ` Daniel De Graaf
2013-11-29 5:48 ` [PATCH v3 7/7] tools: enable Cache QoS Monitoring feature for libxl/libxc dongxiao.xu
2013-11-29 15:29 ` [PATCH v3 0/7] enable Cache QoS Monitoring (CQM) feature Ian Campbell
2013-11-29 15:36 ` Jan Beulich
2013-11-29 15:41 ` Ian Campbell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1385704092-89546-4-git-send-email-dongxiao.xu@intel.com \
--to=dongxiao.xu@intel.com \
--cc=Ian.Campbell@citrix.com \
--cc=Ian.Jackson@eu.citrix.com \
--cc=JBeulich@suse.com \
--cc=andrew.cooper3@citrix.com \
--cc=dgdegra@tycho.nsa.gov \
--cc=keir@xen.org \
--cc=stefano.stabellini@eu.citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).