* [RFC][PATCH v2 02/11] perf: core, add pmu register and lookup functions
@ 2010-05-19 1:45 Lin Ming
2010-05-18 18:38 ` Cyrill Gorcunov
0 siblings, 1 reply; 3+ messages in thread
From: Lin Ming @ 2010-05-19 1:45 UTC (permalink / raw)
To: Peter Zijlstra, Ingo Molnar, Corey Ashford
Cc: Frederic Weisbecker, Paul Mundt, eranian@gmail.com,
Gary.Mohr@Bull.com, arjan@linux.intel.com, Zhang, Yanmin,
Paul Mackerras, David S. Miller, Russell King,
Arnaldo Carvalho de Melo, Will Deacon, Maynard Johnson, Carl Love,
greg@kroah.com, Kay Sievers, lkml
And add new api pmu::init_event
Changes log
v2: Use RCU for synchronization (Peter Zijlstra)
v1: add pmu register and lookup functions
Signed-off-by: Lin Ming <ming.m.lin@intel.com>
---
include/linux/perf_event.h | 11 ++++++
kernel/perf_event.c | 73 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 84 insertions(+), 0 deletions(-)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 6c01c5f..40809f5 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -549,10 +549,16 @@ struct perf_event;
#define PERF_EVENT_TXN_STARTED 1
+#define PMU_TYPE_CPU 0
+#define PMU_TYPE_NODE 1
+
/**
* struct pmu - generic performance monitoring unit
*/
struct pmu {
+ int id;
+ struct list_head entry;
+
int (*enable) (struct perf_event *event);
void (*disable) (struct perf_event *event);
int (*start) (struct perf_event *event);
@@ -569,6 +575,8 @@ struct pmu {
void (*start_txn) (struct pmu *pmu);
void (*cancel_txn) (struct pmu *pmu);
int (*commit_txn) (struct pmu *pmu);
+
+ int (*init_event) (struct perf_event *event);
};
/**
@@ -1013,6 +1021,9 @@ extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
+
+extern int perf_event_register_pmu(struct pmu *pmu);
+extern void perf_event_unregister_pmu(int id);
#else
static inline void
perf_event_task_sched_in(struct task_struct *task) { }
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index ba7a37a..31b032b 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -40,6 +40,12 @@
*/
static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
+/*
+ * The list of multiple hw pmus
+ */
+static struct list_head pmus;
+static DEFINE_SPINLOCK(pmus_lock);
+
int perf_max_events __read_mostly = 1;
static int perf_reserved_percpu __read_mostly;
static int perf_overcommit __read_mostly = 1;
@@ -4678,6 +4684,34 @@ static struct pmu *sw_perf_event_init(struct perf_event *event)
return pmu;
}
+static struct pmu *perf_event_lookup_pmu(struct perf_event *event)
+{
+ struct pmu *pmu = NULL;
+ int pmu_id = event->attr.type;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_HARDWARE:
+ case PERF_TYPE_HW_CACHE:
+ case PERF_TYPE_RAW:
+ pmu_id = PMU_TYPE_CPU;
+ break;
+
+ /* TBD: will add other pmu type later */
+
+ default:
+ return NULL;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ if (pmu->id == pmu_id)
+ break;
+ }
+ rcu_read_unlock();
+
+ return pmu;
+}
+
/*
* Allocate and initialize a event structure
*/
@@ -5635,6 +5669,8 @@ void __init perf_event_init(void)
perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&perf_cpu_nb);
+
+ INIT_LIST_HEAD(&pmus);
}
static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
@@ -5734,3 +5770,40 @@ static int __init perf_event_sysfs_init(void)
&perfclass_attr_group);
}
device_initcall(perf_event_sysfs_init);
+
+int perf_event_register_pmu(struct pmu *pmu)
+{
+ struct pmu *tmp;
+ int ret = 1;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tmp, &pmus, entry) {
+ if (tmp->id == pmu->id) {
+ ret = 0;
+ goto err;
+ }
+ }
+ rcu_read_unlock();
+
+ spin_lock(&pmus_lock);
+ list_add_tail_rcu(&pmu->entry, &pmus);
+ spin_unlock(&pmus_lock);
+
+err:
+ return ret;
+}
+
+void perf_event_unregister_pmu(int id)
+{
+ struct pmu *tmp = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tmp, &pmus, entry) {
+ if (tmp->id == id)
+ break;
+ }
+ rcu_read_unlock();
+
+ if (tmp)
+ list_del_rcu(&tmp->entry);
+}
^ permalink raw reply related [flat|nested] 3+ messages in thread* Re: [RFC][PATCH v2 02/11] perf: core, add pmu register and lookup functions
2010-05-19 1:45 [RFC][PATCH v2 02/11] perf: core, add pmu register and lookup functions Lin Ming
@ 2010-05-18 18:38 ` Cyrill Gorcunov
2010-05-19 2:00 ` Lin Ming
0 siblings, 1 reply; 3+ messages in thread
From: Cyrill Gorcunov @ 2010-05-18 18:38 UTC (permalink / raw)
To: Lin Ming
Cc: Peter Zijlstra, Ingo Molnar, Corey Ashford, Frederic Weisbecker,
Paul Mundt, eranian@gmail.com, Gary.Mohr@Bull.com,
arjan@linux.intel.com, Zhang, Yanmin, Paul Mackerras,
David S. Miller, Russell King, Arnaldo Carvalho de Melo,
Will Deacon, Maynard Johnson, Carl Love, greg@kroah.com,
Kay Sievers, lkml
On Wed, May 19, 2010 at 01:45:02AM +0000, Lin Ming wrote:
...
> +int perf_event_register_pmu(struct pmu *pmu)
> +{
> + struct pmu *tmp;
> + int ret = 1;
> +
> + rcu_read_lock();
> + list_for_each_entry_rcu(tmp, &pmus, entry) {
> + if (tmp->id == pmu->id) {
> + ret = 0;
> + goto err;
Hi Ming, is it supposed to get out in locked state?
I mean imbalanced rcu_read_lock somehow suspicicous ;)
> + }
> + }
> + rcu_read_unlock();
> +
> + spin_lock(&pmus_lock);
> + list_add_tail_rcu(&pmu->entry, &pmus);
> + spin_unlock(&pmus_lock);
> +
> +err:
> + return ret;
> +}
> +
...
-- Cyrill
^ permalink raw reply [flat|nested] 3+ messages in thread* Re: [RFC][PATCH v2 02/11] perf: core, add pmu register and lookup functions
2010-05-18 18:38 ` Cyrill Gorcunov
@ 2010-05-19 2:00 ` Lin Ming
0 siblings, 0 replies; 3+ messages in thread
From: Lin Ming @ 2010-05-19 2:00 UTC (permalink / raw)
To: Cyrill Gorcunov
Cc: Peter Zijlstra, Ingo Molnar, Corey Ashford, Frederic Weisbecker,
Paul Mundt, eranian@gmail.com, Gary.Mohr@Bull.com,
arjan@linux.intel.com, Zhang, Yanmin, Paul Mackerras,
David S. Miller, Russell King, Arnaldo Carvalho de Melo,
Will Deacon, Maynard Johnson, Carl Love, greg@kroah.com,
Kay Sievers, lkml
On Wed, 2010-05-19 at 02:38 +0800, Cyrill Gorcunov wrote:
> On Wed, May 19, 2010 at 01:45:02AM +0000, Lin Ming wrote:
> ...
> > +int perf_event_register_pmu(struct pmu *pmu)
> > +{
> > + struct pmu *tmp;
> > + int ret = 1;
> > +
> > + rcu_read_lock();
> > + list_for_each_entry_rcu(tmp, &pmus, entry) {
> > + if (tmp->id == pmu->id) {
> > + ret = 0;
> > + goto err;
>
> Hi Ming, is it supposed to get out in locked state?
> I mean imbalanced rcu_read_lock somehow suspicicous ;)
Ah, yes, that's real bug.
I'll fix it.
Thanks for review.
>
> > + }
> > + }
> > + rcu_read_unlock();
> > +
> > + spin_lock(&pmus_lock);
> > + list_add_tail_rcu(&pmu->entry, &pmus);
> > + spin_unlock(&pmus_lock);
> > +
> > +err:
> > + return ret;
> > +}
> > +
> ...
>
> -- Cyrill
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2010-05-19 2:01 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-05-19 1:45 [RFC][PATCH v2 02/11] perf: core, add pmu register and lookup functions Lin Ming
2010-05-18 18:38 ` Cyrill Gorcunov
2010-05-19 2:00 ` Lin Ming
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox