From: Kajol Jain <kjain@linux.ibm.com>
To: mpe@ellerman.id.au, linuxppc-dev@lists.ozlabs.org,
linux-nvdimm@lists.01.org, linux-kernel@vger.kernel.org,
peterz@infradead.org
Cc: santosh@fossix.org, maddy@linux.vnet.ibm.com,
rnsastry@linux.ibm.com, aneesh.kumar@linux.ibm.com,
atrajeev@linux.vnet.ibm.com, kjain@linux.ibm.com,
vaibhav@linux.ibm.com, dan.j.williams@intel.com,
ira.weiny@intel.com, tglx@linutronix.de
Subject: [RFC v2 4/4] powerpc/papr_scm: Add cpu hotplug support for nvdimm pmu device
Date: Tue, 25 May 2021 18:52:16 +0530 [thread overview]
Message-ID: <20210525132216.1239259-5-kjain@linux.ibm.com> (raw)
In-Reply-To: <20210525132216.1239259-1-kjain@linux.ibm.com>
Patch here adds cpu hotplug functions to nvdimm pmu.
It adds cpumask to designate a cpu to make HCALL to
collect the counter data for the nvdimm device and
update ABI documentation accordingly.
Result in power9 lpar system:
command:# cat /sys/devices/nmem0/cpumask
0
Signed-off-by: Kajol Jain <kjain@linux.ibm.com>
---
Documentation/ABI/testing/sysfs-bus-papr-pmem | 6 ++
arch/powerpc/platforms/pseries/papr_scm.c | 61 +++++++++++++++++++
include/linux/nd.h | 17 ++++--
3 files changed, 79 insertions(+), 5 deletions(-)
diff --git a/Documentation/ABI/testing/sysfs-bus-papr-pmem b/Documentation/ABI/testing/sysfs-bus-papr-pmem
index 38c4daf65af2..986df1691914 100644
--- a/Documentation/ABI/testing/sysfs-bus-papr-pmem
+++ b/Documentation/ABI/testing/sysfs-bus-papr-pmem
@@ -76,6 +76,12 @@ Description: (RO) Attribute group to describe the magic bits
For example::
noopstat = "event=0x1"
+What: /sys/devices/nmemX/cpumask
+Date: May 2021
+Contact: linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, linux-nvdimm@lists.01.org,
+Description: (RO) This sysfs file exposes the cpumask which is designated to make
+ HCALLs to retrieve nvdimm pmu event counter data.
+
What: /sys/devices/nmemX/events
Date: May 2021
Contact: linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, linux-nvdimm@lists.01.org,
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index f2d57da98ff4..76121d876b7f 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -339,6 +339,28 @@ static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p,
return 0;
}
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct nvdimm_pmu *nd_pmu;
+
+ nd_pmu = container_of(pmu, struct nvdimm_pmu, pmu);
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(nd_pmu->cpu));
+}
+
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *nvdimm_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group nvdimm_pmu_cpumask_group = {
+ .attrs = nvdimm_cpumask_attrs,
+};
+
PMU_FORMAT_ATTR(event, "config:0-4");
static struct attribute *nvdimm_pmu_format_attr[] = {
@@ -459,6 +481,24 @@ static void papr_scm_pmu_del(struct perf_event *event, int flags)
papr_scm_pmu_read(event);
}
+static int nvdimm_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct nvdimm_pmu *nd_pmu;
+ int target;
+
+ nd_pmu = hlist_entry_safe(node, struct nvdimm_pmu, node);
+
+ if (cpu != nd_pmu->cpu)
+ return 0;
+
+ target = cpumask_last(cpu_active_mask);
+ if (target < 0 || target >= nr_cpu_ids)
+ return -1;
+
+ nd_pmu->cpu = target;
+ return 0;
+}
+
static ssize_t device_show_string(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -603,6 +643,7 @@ static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu
/* Fill attribute groups for the nvdimm pmu device */
nd_pmu->attr_groups[NVDIMM_PMU_FORMAT_ATTR] = &nvdimm_pmu_format_group;
nd_pmu->attr_groups[NVDIMM_PMU_EVENT_ATTR] = nvdimm_pmu_events_group;
+ nd_pmu->attr_groups[NVDIMM_PMU_CPUMASK_ATTR] = &nvdimm_pmu_cpumask_group;
nd_pmu->attr_groups[NVDIMM_PMU_NULL_ATTR] = NULL;
kfree(single_stats);
@@ -652,6 +693,20 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p)
nd_pmu->read = papr_scm_pmu_read;
nd_pmu->add = papr_scm_pmu_add;
nd_pmu->del = papr_scm_pmu_del;
+ nd_pmu->cpu = raw_smp_processor_id();
+
+ rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/nvdimm:online",
+ NULL, nvdimm_pmu_offline_cpu);
+ if (rc < 0)
+ goto pmu_cpuhp_setup_err;
+
+ nd_pmu->cpuhp_state = rc;
+
+ /* Register the pmu instance for cpu hotplug */
+ rc = cpuhp_state_add_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
+ if (rc)
+ goto cpuhp_instance_err;
rc = register_nvdimm_pmu(nd_pmu, p->pdev);
if (rc)
@@ -665,6 +720,10 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p)
return;
pmu_register_err:
+ cpuhp_state_remove_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
+cpuhp_instance_err:
+ cpuhp_remove_multi_state(nd_pmu->cpuhp_state);
+pmu_cpuhp_setup_err:
nvdimm_pmu_mem_free(nd_pmu);
kfree(p->nvdimm_events_map);
pmu_check_events_err:
@@ -675,6 +734,8 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p)
static void nvdimm_pmu_uinit(struct nvdimm_pmu *nd_pmu)
{
+ cpuhp_state_remove_instance_nocalls(nd_pmu->cpuhp_state, &nd_pmu->node);
+ cpuhp_remove_multi_state(nd_pmu->cpuhp_state);
unregister_nvdimm_pmu(&nd_pmu->pmu);
nvdimm_pmu_mem_free(nd_pmu);
kfree(nd_pmu);
diff --git a/include/linux/nd.h b/include/linux/nd.h
index a0e0619256be..177795413ab3 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -28,7 +28,8 @@ enum nvdimm_claim_class {
/* Event attribute array index */
#define NVDIMM_PMU_FORMAT_ATTR 0
#define NVDIMM_PMU_EVENT_ATTR 1
-#define NVDIMM_PMU_NULL_ATTR 2
+#define NVDIMM_PMU_CPUMASK_ATTR 2
+#define NVDIMM_PMU_NULL_ATTR 3
/**
* struct nvdimm_pmu - data structure for nvdimm perf driver
@@ -37,7 +38,10 @@ enum nvdimm_claim_class {
* @pmu: pmu data structure for nvdimm performance stats.
* @dev: nvdimm device pointer.
* @functions(event_init/add/del/read): platform specific pmu functions.
- * @attr_groups: data structure for events and formats.
+ * @attr_groups: data structure for events, formats and cpumask
+ * @cpu: designated cpu for counter access.
+ * @node: node for cpu hotplug notifier link.
+ * @cpuhp_state: state for cpu hotplug notification.
*/
struct nvdimm_pmu {
const char *name;
@@ -49,10 +53,13 @@ struct nvdimm_pmu {
void (*read)(struct perf_event *event);
/*
* Attribute groups for the nvdimm pmu. Index 0 used for
- * format attribute, index 1 used for event attribute and
- * index 2 kept as NULL.
+ * format attribute, index 1 used for event attribute,
+ * index 2 used for cpusmask attribute and index 3 kept as NULL.
*/
- const struct attribute_group *attr_groups[3];
+ const struct attribute_group *attr_groups[4];
+ int cpu;
+ struct hlist_node node;
+ enum cpuhp_state cpuhp_state;
};
int register_nvdimm_pmu(struct nvdimm_pmu *nvdimm, struct platform_device *pdev);
--
2.27.0
next prev parent reply other threads:[~2021-05-25 13:25 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-05-25 13:22 [RFC v2 0/4] Add perf interface to expose nvdimm Kajol Jain
2021-05-25 13:22 ` [RFC v2 1/4] drivers/nvdimm: Add perf interface to expose nvdimm performance stats Kajol Jain
2021-05-25 13:22 ` [RFC v2 2/4] powerpc/papr_scm: Add perf interface support Kajol Jain
2021-05-25 13:22 ` [RFC v2 3/4] powerpc/papr_scm: Document papr_scm sysfs event format entries Kajol Jain
2021-05-25 13:22 ` Kajol Jain [this message]
2021-05-25 14:16 ` [RFC v2 4/4] powerpc/papr_scm: Add cpu hotplug support for nvdimm pmu device Peter Zijlstra
2021-05-26 7:26 ` kajoljain
2021-05-26 8:32 ` Peter Zijlstra
2021-05-28 7:53 ` kajoljain
2021-05-26 8:45 ` Aneesh Kumar K.V
2021-05-26 9:08 ` kajoljain
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210525132216.1239259-5-kjain@linux.ibm.com \
--to=kjain@linux.ibm.com \
--cc=aneesh.kumar@linux.ibm.com \
--cc=atrajeev@linux.vnet.ibm.com \
--cc=dan.j.williams@intel.com \
--cc=ira.weiny@intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-nvdimm@lists.01.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=maddy@linux.vnet.ibm.com \
--cc=mpe@ellerman.id.au \
--cc=peterz@infradead.org \
--cc=rnsastry@linux.ibm.com \
--cc=santosh@fossix.org \
--cc=tglx@linutronix.de \
--cc=vaibhav@linux.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).