From: Andrew Cooper <andrew.cooper3@citrix.com>
To: dongxiao.xu@intel.com
Cc: keir@xen.org, Ian.Campbell@citrix.com,
stefano.stabellini@eu.citrix.com, Ian.Jackson@eu.citrix.com,
xen-devel@lists.xen.org, JBeulich@suse.com,
dgdegra@tycho.nsa.gov
Subject: Re: [PATCH v3 4/7] x86: collect CQM information from all sockets
Date: Fri, 29 Nov 2013 14:53:32 +0000 [thread overview]
Message-ID: <5298AA6C.7070802@citrix.com> (raw)
In-Reply-To: <1385704092-89546-5-git-send-email-dongxiao.xu@intel.com>
On 29/11/13 05:48, dongxiao.xu@intel.com wrote:
> From: Dongxiao Xu <dongxiao.xu@intel.com>
>
> Collect CQM information (L3 cache occupancy) from all sockets.
> Upper layer application can parse the data structure to get the
> information of guest's L3 cache occupancy on certain sockets.
>
> Signed-off-by: Jiongxi Li <jiongxi.li@intel.com>
> Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
> ---
> xen/arch/x86/pqos.c | 59 ++++++++++++++++++++++++++
> xen/arch/x86/sysctl.c | 89 +++++++++++++++++++++++++++++++++++++++
> xen/include/asm-x86/msr-index.h | 4 ++
> xen/include/asm-x86/pqos.h | 8 ++++
> xen/include/public/domctl.h | 9 ++++
> xen/include/public/sysctl.h | 11 +++++
> 6 files changed, 180 insertions(+)
>
> diff --git a/xen/arch/x86/pqos.c b/xen/arch/x86/pqos.c
> index 1148f3b..615c5ea 100644
> --- a/xen/arch/x86/pqos.c
> +++ b/xen/arch/x86/pqos.c
> @@ -19,6 +19,7 @@
> * Place - Suite 330, Boston, MA 02111-1307 USA.
> */
> #include <asm/processor.h>
> +#include <asm/msr.h>
> #include <xen/init.h>
> #include <xen/spinlock.h>
> #include <asm/pqos.h>
> @@ -91,6 +92,26 @@ bool_t system_supports_cqm(void)
> return cqm_enabled;
> }
>
> +unsigned int get_cqm_count(void)
> +{
> + return cqm_rmid_count;
> +}
> +
> +unsigned int get_cqm_avail(void)
> +{
> + unsigned int rmid, cqm_avail = 0;
> + unsigned long flags;
> +
> + spin_lock_irqsave(&cqm_lock, flags);
> + /* RMID=0 is reserved, enumerate from 1 */
> + for ( rmid = 1; rmid < cqm_rmid_count; rmid++ )
> + if ( cqm_rmid_array[rmid] == DOMID_INVALID )
> + cqm_avail++;
> + spin_unlock_irqrestore(&cqm_lock, flags);
> +
> + return cqm_avail;
This cqm_avail is stale as soon as you release the lock.
> +}
> +
> int alloc_cqm_rmid(struct domain *d)
> {
> int rc = 0;
> @@ -139,6 +160,44 @@ void free_cqm_rmid(struct domain *d)
> d->arch.pqos_cqm_rmid = 0;
> }
>
> +static void read_cqm_data(void *arg)
> +{
> + uint64_t cqm_data;
> + unsigned int rmid;
> + int socket = cpu_to_socket(smp_processor_id());
> + struct xen_socket_cqmdata *data = arg;
> + unsigned long flags, i;
> +
> + if ( socket < 0 )
> + return;
> +
> + spin_lock_irqsave(&cqm_lock, flags);
> + /* RMID=0 is reserved, enumerate from 1 */
> + for ( rmid = 1; rmid < cqm_rmid_count; rmid++ )
> + {
> + if ( cqm_rmid_array[rmid] == DOMID_INVALID )
> + continue;
> +
> + wrmsr(MSR_IA32_QOSEVTSEL, QOS_MONITOR_EVTID_L3, rmid);
> + rdmsrl(MSR_IA32_QMC, cqm_data);
> +
> + i = socket * cqm_rmid_count + rmid;
> + data[i].valid = !(cqm_data & IA32_QM_CTR_ERROR_MASK);
> + if ( data[i].valid )
> + {
> + data[i].l3c_occupancy = cqm_data * cqm_upscaling_factor;
> + data[i].socket = socket;
> + data[i].domid = cqm_rmid_array[rmid];
> + }
> + }
> + spin_unlock_irqrestore(&cqm_lock, flags);
> +}
> +
> +void get_cqm_info(cpumask_t *cpu_cqmdata_map, struct xen_socket_cqmdata *data)
> +{
> + on_selected_cpus(cpu_cqmdata_map, read_cqm_data, data, 1);
> +}
> +
> /*
> * Local variables:
> * mode: C
> diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c
> index 15d4b91..f916fe6 100644
> --- a/xen/arch/x86/sysctl.c
> +++ b/xen/arch/x86/sysctl.c
> @@ -28,6 +28,7 @@
> #include <xen/nodemask.h>
> #include <xen/cpu.h>
> #include <xsm/xsm.h>
> +#include <asm/pqos.h>
>
> #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
>
> @@ -66,6 +67,47 @@ void arch_do_physinfo(xen_sysctl_physinfo_t *pi)
> pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
> }
>
> +/* Select one random CPU for each socket */
I know this is not specifically a fault of your code, but these masks of
cpus on specific sockets is really information which should be set up on
boot and tweaked on cpu_up/down.
It should certainly not be recalculated from scratch every time this
hypercall is made. (And that would prevent needing to make an
xalloc/xfree on the hypercall path)
~Andrew
> +static void select_socket_cpu(cpumask_t *cpu_bitmap)
> +{
> + int i;
> + unsigned int cpu;
> + cpumask_t *socket_cpuset;
> + int max_socket = 0;
> + unsigned int num_cpus = num_online_cpus();
> + DECLARE_BITMAP(sockets, num_cpus);
> +
> + cpumask_clear(cpu_bitmap);
> +
> + for_each_online_cpu(cpu)
> + {
> + i = cpu_to_socket(cpu);
> + if ( i < 0 || test_and_set_bit(i, sockets) )
> + continue;
> + max_socket = max(max_socket, i);
> + }
> +
> + socket_cpuset = xzalloc_array(cpumask_t, max_socket + 1);
> + if ( !socket_cpuset )
> + return;
> +
> + for_each_online_cpu(cpu)
> + {
> + i = cpu_to_socket(cpu);
> + if ( i < 0 )
> + continue;
> + cpumask_set_cpu(cpu, &socket_cpuset[i]);
> + }
> +
> + for ( i = 0; i <= max_socket; i++ )
> + {
> + cpu = cpumask_any(&socket_cpuset[i]);
> + cpumask_set_cpu(cpu, cpu_bitmap);
> + }
> +
> + xfree(socket_cpuset);
> +}
> +
> long arch_do_sysctl(
> struct xen_sysctl *sysctl, XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
> {
> @@ -101,6 +143,53 @@ long arch_do_sysctl(
> }
> break;
>
> + case XEN_SYSCTL_getcqminfo:
> + {
> + struct xen_socket_cqmdata *info;
> + uint32_t num_sockets;
> + uint32_t num_rmid;
> + cpumask_t cpu_cqmdata_map;
> +
> + if ( !system_supports_cqm() )
> + {
> + ret = -ENODEV;
> + break;
> + }
> +
> + select_socket_cpu(&cpu_cqmdata_map);
> +
> + num_sockets = min((unsigned int)cpumask_weight(&cpu_cqmdata_map),
> + sysctl->u.getcqminfo.num_sockets);
> + num_rmid = get_cqm_count();
> + info = xzalloc_array(struct xen_socket_cqmdata,
> + num_rmid * num_sockets);
> + if ( !info )
> + {
> + ret = -ENOMEM;
> + break;
> + }
> +
> + get_cqm_info(&cpu_cqmdata_map, info);
> +
> + if ( copy_to_guest_offset(sysctl->u.getcqminfo.buffer,
> + 0, info, num_rmid * num_sockets) )
> + {
> + ret = -EFAULT;
> + xfree(info);
> + break;
> + }
> +
> + sysctl->u.getcqminfo.num_rmid = num_rmid;
> + sysctl->u.getcqminfo.num_rmid_avail = get_cqm_avail();
> + sysctl->u.getcqminfo.num_sockets = num_sockets;
> +
> + if ( copy_to_guest(u_sysctl, sysctl, 1) )
> + ret = -EFAULT;
> +
> + xfree(info);
> + }
> + break;
> +
> default:
> ret = -ENOSYS;
> break;
> diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
> index e597a28..46ef165 100644
> --- a/xen/include/asm-x86/msr-index.h
> +++ b/xen/include/asm-x86/msr-index.h
> @@ -488,4 +488,8 @@
> /* Geode defined MSRs */
> #define MSR_GEODE_BUSCONT_CONF0 0x00001900
>
> +/* Platform QoS register */
> +#define MSR_IA32_QOSEVTSEL 0x00000c8d
> +#define MSR_IA32_QMC 0x00000c8e
> +
> #endif /* __ASM_MSR_INDEX_H */
> diff --git a/xen/include/asm-x86/pqos.h b/xen/include/asm-x86/pqos.h
> index c54905b..2ab9277 100644
> --- a/xen/include/asm-x86/pqos.h
> +++ b/xen/include/asm-x86/pqos.h
> @@ -21,6 +21,8 @@
> #ifndef ASM_PQOS_H
> #define ASM_PQOS_H
> #include <xen/sched.h>
> +#include <xen/cpumask.h>
> +#include <public/domctl.h>
>
> /* QoS Resource Type Enumeration */
> #define QOS_MONITOR_TYPE_L3 0x2
> @@ -28,10 +30,16 @@
> /* QoS Monitoring Event ID */
> #define QOS_MONITOR_EVTID_L3 0x1
>
> +/* IA32_QM_CTR */
> +#define IA32_QM_CTR_ERROR_MASK (0x3ul << 62)
> +
> void init_platform_qos(void);
>
> bool_t system_supports_cqm(void);
> int alloc_cqm_rmid(struct domain *d);
> void free_cqm_rmid(struct domain *d);
> +unsigned int get_cqm_count(void);
> +unsigned int get_cqm_avail(void);
> +void get_cqm_info(cpumask_t *cpu_cqmdata_map, struct xen_socket_cqmdata *data);
>
> #endif
> diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
> index 800b2f4..53c740e 100644
> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -883,6 +883,15 @@ struct xen_domctl_qos_type {
> typedef struct xen_domctl_qos_type xen_domctl_qos_type_t;
> DEFINE_XEN_GUEST_HANDLE(xen_domctl_qos_type_t);
>
> +struct xen_socket_cqmdata {
> + uint64_t l3c_occupancy;
> + uint32_t socket;
> + domid_t domid;
> + uint8_t valid;
> +};
> +typedef struct xen_socket_cqmdata xen_socket_cqmdata_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_socket_cqmdata_t);
> +
> struct xen_domctl {
> uint32_t cmd;
> #define XEN_DOMCTL_createdomain 1
> diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
> index 8437d31..85eee16 100644
> --- a/xen/include/public/sysctl.h
> +++ b/xen/include/public/sysctl.h
> @@ -632,6 +632,15 @@ struct xen_sysctl_coverage_op {
> typedef struct xen_sysctl_coverage_op xen_sysctl_coverage_op_t;
> DEFINE_XEN_GUEST_HANDLE(xen_sysctl_coverage_op_t);
>
> +/* XEN_SYSCTL_getcqminfo */
> +struct xen_sysctl_getcqminfo {
> + XEN_GUEST_HANDLE_64(xen_socket_cqmdata_t) buffer; /* OUT */
> + uint32_t num_sockets; /* IN/OUT */
> + uint32_t num_rmid; /* OUT */
> + uint32_t num_rmid_avail; /* OUT */
> +};
> +typedef struct xen_sysctl_getcqminfo xen_sysctl_getcqminfo_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcqminfo_t);
>
> struct xen_sysctl {
> uint32_t cmd;
> @@ -654,6 +663,7 @@ struct xen_sysctl {
> #define XEN_SYSCTL_cpupool_op 18
> #define XEN_SYSCTL_scheduler_op 19
> #define XEN_SYSCTL_coverage_op 20
> +#define XEN_SYSCTL_getcqminfo 21
> uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
> union {
> struct xen_sysctl_readconsole readconsole;
> @@ -675,6 +685,7 @@ struct xen_sysctl {
> struct xen_sysctl_cpupool_op cpupool_op;
> struct xen_sysctl_scheduler_op scheduler_op;
> struct xen_sysctl_coverage_op coverage_op;
> + struct xen_sysctl_getcqminfo getcqminfo;
> uint8_t pad[128];
> } u;
> };
next prev parent reply other threads:[~2013-11-29 14:53 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-11-29 5:48 [PATCH v3 0/7] enable Cache QoS Monitoring (CQM) feature dongxiao.xu
2013-11-29 5:48 ` [PATCH v3 1/7] x86: detect and initialize Cache QoS Monitoring feature dongxiao.xu
2013-11-29 13:54 ` Andrew Cooper
2013-11-29 15:05 ` Jan Beulich
2013-11-30 0:41 ` Xu, Dongxiao
2013-12-02 2:17 ` Xu, Dongxiao
2013-12-02 9:22 ` Xu, Dongxiao
2013-11-29 13:59 ` Andrew Cooper
2013-11-30 0:42 ` Xu, Dongxiao
2013-11-29 5:48 ` [PATCH v3 2/7] x86: handle CQM resource when creating/destroying guests dongxiao.xu
2013-11-29 14:18 ` Andrew Cooper
2013-11-29 15:01 ` Andrew Cooper
2013-11-29 15:07 ` Jan Beulich
2013-11-29 5:48 ` [PATCH v3 3/7] x86: dynamically attach/detach CQM service for a guest dongxiao.xu
2013-11-29 14:22 ` Andrew Cooper
2013-11-29 5:48 ` [PATCH v3 4/7] x86: collect CQM information from all sockets dongxiao.xu
2013-11-29 14:53 ` Andrew Cooper [this message]
2013-11-30 1:27 ` Xu, Dongxiao
2013-11-29 5:48 ` [PATCH v3 5/7] x86: enable CQM monitoring for each domain RMID dongxiao.xu
2013-11-29 14:56 ` Andrew Cooper
2013-11-30 1:27 ` Xu, Dongxiao
2013-11-29 5:48 ` [PATCH v3 6/7] xsm: add platform QoS related xsm policies dongxiao.xu
2013-11-29 15:50 ` Daniel De Graaf
2013-11-29 5:48 ` [PATCH v3 7/7] tools: enable Cache QoS Monitoring feature for libxl/libxc dongxiao.xu
2013-11-29 15:29 ` [PATCH v3 0/7] enable Cache QoS Monitoring (CQM) feature Ian Campbell
2013-11-29 15:36 ` Jan Beulich
2013-11-29 15:41 ` Ian Campbell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5298AA6C.7070802@citrix.com \
--to=andrew.cooper3@citrix.com \
--cc=Ian.Campbell@citrix.com \
--cc=Ian.Jackson@eu.citrix.com \
--cc=JBeulich@suse.com \
--cc=dgdegra@tycho.nsa.gov \
--cc=dongxiao.xu@intel.com \
--cc=keir@xen.org \
--cc=stefano.stabellini@eu.citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).