xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: dongxiao.xu@intel.com
To: xen-devel@lists.xen.org
Subject: [PATCH 6/8] x86: get per domain CQM information
Date: Wed, 20 Nov 2013 11:27:36 +0800	[thread overview]
Message-ID: <1384918058-128466-7-git-send-email-dongxiao.xu@intel.com> (raw)
In-Reply-To: <1384918058-128466-1-git-send-email-dongxiao.xu@intel.com>

From: Dongxiao Xu <dongxiao.xu@intel.com>

Retrive CQM information for certain domain, which reflects the L3 cache
occupancy for a socket.

Signed-off-by: Jiongxi Li <jiongxi.li@intel.com>
Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
---
 xen/arch/x86/pqos.c             |   60 ++++++++++++++++++++++++++++++++++++
 xen/arch/x86/sysctl.c           |   64 +++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/msr-index.h |    4 +++
 xen/include/asm-x86/pqos.h      |   14 +++++++++
 xen/include/public/domctl.h     |   14 +++++++++
 xen/include/public/sysctl.h     |   23 ++++++++++++++
 6 files changed, 179 insertions(+)

diff --git a/xen/arch/x86/pqos.c b/xen/arch/x86/pqos.c
index 895d892..3699efe 100644
--- a/xen/arch/x86/pqos.c
+++ b/xen/arch/x86/pqos.c
@@ -19,13 +19,30 @@
  * Place - Suite 330, Boston, MA 02111-1307 USA.
  */
 #include <asm/processor.h>
+#include <asm/msr.h>
+#include <xen/cpumask.h>
 #include <xen/init.h>
 #include <xen/spinlock.h>
+#include <public/domctl.h>
 #include <asm/pqos.h>
 
 static bool_t pqos_enabled = 1;
 boolean_param("pqos", pqos_enabled);
 
+static void read_qm_data(void *arg)
+{
+    struct qm_element *qm_element = arg;
+
+    wrmsr(MSR_IA32_QOSEVTSEL, qm_element->evtid, qm_element->rmid);
+    rdmsrl(MSR_IA32_QMC, qm_element->qm_data);
+}
+
+static void get_generic_qm_info(struct qm_element *qm_element)
+{
+    int cpu = qm_element->cpu;
+    on_selected_cpus(cpumask_of(cpu), read_qm_data, qm_element, 1);
+}
+
 unsigned int cqm_res_count = 0;
 unsigned int cqm_upscaling_factor = 0;
 bool_t cqm_enabled = 0;
@@ -85,6 +102,25 @@ bool_t system_support_cqm(void)
     return cqm_enabled;
 }
 
+unsigned int get_cqm_count(void)
+{
+    return cqm_res_count;
+}
+
+unsigned int get_cqm_avail(void)
+{
+    unsigned int cqm_avail = 0;
+    int i;
+
+    for (i = 0; i < cqm_res_count; i++)
+    {
+        if ( !cqm_res_array[i].inuse )
+            cqm_avail++;
+    }
+
+    return cqm_avail;
+}
+
 int alloc_cqm_resource(domid_t domain_id)
 {
     int i, rmid = -1;
@@ -136,6 +172,30 @@ void free_cqm_resource(domid_t domain_id)
     spin_unlock_irqrestore(&cqm_lock, flags);
 }
 
+void get_cqm_info(uint32_t rmid, cpumask_t cpu_cqmdata_map,
+                  struct xen_domctl_getdomcqminfo *info)
+{
+    struct qm_element element;
+    int cpu, i;
+
+    for_each_cpu ( cpu, &cpu_cqmdata_map )
+    {
+        element.cpu = cpu;
+        element.rmid = rmid;
+        element.evtid = QOS_MONITOR_EVTID_L3;
+
+        get_generic_qm_info(&element);
+
+        i = cpu_to_socket(cpu);
+        info->socket_cqmdata[i].valid =
+            (element.qm_data & IA32_QM_CTR_ERROR_MASK) ? 0 : 1;
+        if ( info->socket_cqmdata[i].valid )
+            info->socket_cqmdata[i].l3c_occupancy = element.qm_data * cqm_upscaling_factor;
+        else
+            info->socket_cqmdata[i].l3c_occupancy = 0;
+    }
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c
index 15d4b91..a779fdc 100644
--- a/xen/arch/x86/sysctl.c
+++ b/xen/arch/x86/sysctl.c
@@ -28,6 +28,7 @@
 #include <xen/nodemask.h>
 #include <xen/cpu.h>
 #include <xsm/xsm.h>
+#include <asm/pqos.h>
 
 #define get_xen_guest_handle(val, hnd)  do { val = (hnd).p; } while (0)
 
@@ -101,6 +102,69 @@ long arch_do_sysctl(
     }
     break;
 
+    case XEN_SYSCTL_getdomcqminfolist:
+    {
+        struct domain *d;
+        struct xen_domctl_getdomcqminfo info;
+        uint32_t resource_count;
+        uint32_t resource_avail;
+        uint32_t num_domains = 0;
+        cpumask_t cpu_cqmdata_map;
+        DECLARE_BITMAP(sockets, QOS_MAX_SOCKETS);
+        int cpu;
+
+        if ( !system_support_cqm() )
+        {
+            ret = -EFAULT;
+            break;
+        }
+
+        resource_count = get_cqm_count();
+        resource_avail = get_cqm_avail();
+
+        cpumask_clear(&cpu_cqmdata_map);
+        bitmap_zero(sockets, QOS_MAX_SOCKETS);
+        for_each_online_cpu(cpu)
+        {
+            int i = cpu_to_socket(cpu);
+            if ( test_and_set_bit(i, sockets) )
+                continue;
+            cpumask_set_cpu(cpu, &cpu_cqmdata_map);
+        }
+
+        rcu_read_lock(&domlist_read_lock);
+        for_each_domain ( d )
+        {
+            if ( d->domain_id < sysctl->u.getdomaininfolist.first_domain )
+                continue;
+            if ( num_domains == sysctl->u.getdomaininfolist.max_domains )
+                break;
+            if ( d->arch.pqos_cqm_rmid <= 0 )
+                continue;
+            memset(&info, 0, sizeof(struct xen_domctl_getdomcqminfo));
+            info.domain = d->domain_id;
+            get_cqm_info(d->arch.pqos_cqm_rmid, cpu_cqmdata_map, &info);
+
+            if ( copy_to_guest_offset(sysctl->u.getdomcqminfolist.buffer,
+                                      num_domains, &info, 1) )
+            {
+                ret = -EFAULT;
+                break;
+            }
+
+            num_domains++;
+        }
+        rcu_read_unlock(&domlist_read_lock);
+
+        sysctl->u.getdomcqminfolist.num_domains = num_domains;
+        sysctl->u.getdomcqminfolist.resource_count = resource_count;
+        sysctl->u.getdomcqminfolist.resource_avail = resource_avail;
+
+        if ( copy_to_guest(u_sysctl, sysctl, 1) )
+            ret = -EFAULT;
+    }
+    break;
+
     default:
         ret = -ENOSYS;
         break;
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index e597a28..46ef165 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -488,4 +488,8 @@
 /* Geode defined MSRs */
 #define MSR_GEODE_BUSCONT_CONF0		0x00001900
 
+/* Platform QoS register */
+#define MSR_IA32_QOSEVTSEL             0x00000c8d
+#define MSR_IA32_QMC                   0x00000c8e
+
 #endif /* __ASM_MSR_INDEX_H */
diff --git a/xen/include/asm-x86/pqos.h b/xen/include/asm-x86/pqos.h
index 7e32fa5..6d1b1e8 100644
--- a/xen/include/asm-x86/pqos.h
+++ b/xen/include/asm-x86/pqos.h
@@ -27,15 +27,29 @@
 /* QoS Monitoring Event ID */
 #define QOS_MONITOR_EVTID_L3           0x1
 
+/* IA32_QM_CTR */
+#define IA32_QM_CTR_ERROR_MASK         (0x3ul << 62)
+
 struct cqm_res_struct {
     bool_t    inuse;
     uint16_t  domain_id;
 };
 
+struct qm_element {
+    uint16_t  cpu;
+    uint32_t  rmid;
+    uint8_t   evtid;
+    uint64_t  qm_data;
+};
+
 void init_platform_qos(void);
 
 bool_t system_support_cqm(void);
 int alloc_cqm_resource(domid_t);
 void free_cqm_resource(domid_t);
+unsigned int get_cqm_count(void);
+unsigned int get_cqm_avail(void);
+void get_cqm_info(uint32_t rmid, cpumask_t cpu_cqmdata_map,
+                  struct xen_domctl_getdomcqminfo *info);
 
 #endif
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 4fe21db..bdefc83 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -883,6 +883,20 @@ struct xen_domctl_qos_resource {
 typedef struct xen_domctl_qos_resource xen_domctl_qos_resource_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_qos_resource_t);
 
+struct xen_socket_cqmdata {
+    uint8_t  valid;
+    uint64_t l3c_occupancy;
+};
+
+struct xen_domctl_getdomcqminfo {
+    /* OUT variables. */
+    domid_t  domain;
+#define QOS_MAX_SOCKETS    128
+    struct xen_socket_cqmdata socket_cqmdata[QOS_MAX_SOCKETS];
+};
+typedef struct xen_domctl_getdomcqminfo xen_domctl_getdomcqminfo_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomcqminfo_t);
+
 struct xen_domctl {
     uint32_t cmd;
 #define XEN_DOMCTL_createdomain                   1
diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
index 8437d31..91f206e 100644
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -149,6 +149,14 @@ struct xen_sysctl_perfc_op {
 typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t;
 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t);
 
+struct xen_sysctl_getcqminfo
+{
+    uint32_t resource_count;
+    uint32_t resource_avail;
+    struct xen_domctl_getdomcqminfo *dom_cqminfo;
+};
+typedef struct xen_sysctl_getcqminfo xen_sysctl_getcqminfo_t;
+
 /* XEN_SYSCTL_getdomaininfolist */
 struct xen_sysctl_getdomaininfolist {
     /* IN variables. */
@@ -632,6 +640,19 @@ struct xen_sysctl_coverage_op {
 typedef struct xen_sysctl_coverage_op xen_sysctl_coverage_op_t;
 DEFINE_XEN_GUEST_HANDLE(xen_sysctl_coverage_op_t);
 
+/* XEN_SYSCTL_getdomcqminfolist */
+struct xen_sysctl_getdomcqminfolist {
+    /* IN variables. */
+    domid_t               first_domain;
+    uint32_t              max_domains;
+    XEN_GUEST_HANDLE_64(xen_domctl_getdomcqminfo_t) buffer;
+    /* OUT variables. */
+    uint32_t              num_domains;
+    uint32_t              resource_count;
+    uint32_t              resource_avail;
+};
+typedef struct xen_sysctl_getdomcqminfolist xen_sysctl_getdomcqminfolist_t;
+DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomcqminfolist_t);
 
 struct xen_sysctl {
     uint32_t cmd;
@@ -654,6 +675,7 @@ struct xen_sysctl {
 #define XEN_SYSCTL_cpupool_op                    18
 #define XEN_SYSCTL_scheduler_op                  19
 #define XEN_SYSCTL_coverage_op                   20
+#define XEN_SYSCTL_getdomcqminfolist             21
     uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
     union {
         struct xen_sysctl_readconsole       readconsole;
@@ -675,6 +697,7 @@ struct xen_sysctl {
         struct xen_sysctl_cpupool_op        cpupool_op;
         struct xen_sysctl_scheduler_op      scheduler_op;
         struct xen_sysctl_coverage_op       coverage_op;
+        struct xen_sysctl_getdomcqminfolist getdomcqminfolist;
         uint8_t                             pad[128];
     } u;
 };
-- 
1.7.9.5

  parent reply	other threads:[~2013-11-20  3:27 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-11-20  3:27 [PATCH 0/8] enable Cache QoS Monitoring (CQM) feature dongxiao.xu
2013-11-20  3:27 ` [PATCH 1/8] x86: detect and initialize Cache QoS Monitoring feature dongxiao.xu
2013-11-20 11:11   ` Andrew Cooper
2013-11-20 12:40     ` Xu, Dongxiao
2013-11-20  3:27 ` [PATCH 2/8] x86: handle CQM resource when creating/destroying guests dongxiao.xu
2013-11-20 11:32   ` Andrew Cooper
2013-11-20 12:49     ` Xu, Dongxiao
2013-11-20  3:27 ` [PATCH 3/8] tools: " dongxiao.xu
2013-11-20  3:27 ` [PATCH 4/8] x86: dynamically attach/detach CQM service for a guest dongxiao.xu
2013-11-20 11:44   ` Andrew Cooper
2013-11-20 13:19     ` Xu, Dongxiao
2013-11-20  3:27 ` [PATCH 5/8] tools: " dongxiao.xu
2013-11-20  3:27 ` dongxiao.xu [this message]
2013-11-20 11:56   ` [PATCH 6/8] x86: get per domain CQM information Andrew Cooper
2013-11-20 13:22     ` Xu, Dongxiao
2013-11-20  3:27 ` [PATCH 7/8] tools: " dongxiao.xu
2013-11-20  3:27 ` [PATCH 8/8] x86: enable CQM monitoring for each domain RMID dongxiao.xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1384918058-128466-7-git-send-email-dongxiao.xu@intel.com \
    --to=dongxiao.xu@intel.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).