linux-perf-users.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Namhyung Kim <namhyung@kernel.org>
To: Ian Rogers <irogers@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@redhat.com>,
	Arnaldo Carvalho de Melo <acme@kernel.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Alexander Shishkin <alexander.shishkin@linux.intel.com>,
	Jiri Olsa <jolsa@kernel.org>,
	Adrian Hunter <adrian.hunter@intel.com>,
	Kan Liang <kan.liang@linux.intel.com>,
	Weilin Wang <weilin.wang@intel.com>,
	James Clark <james.clark@linaro.org>, Xu Yang <xu.yang_2@nxp.com>,
	John Garry <john.g.garry@oracle.com>,
	Howard Chu <howardchu95@gmail.com>,
	Levi Yun <yeoreum.yun@arm.com>,
	Dominique Martinet <asmadeus@codewreck.org>,
	linux-perf-users@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH v4 11/16] perf intel-tpebs: Add mutex for tpebs_results
Date: Fri, 11 Apr 2025 15:54:53 -0700	[thread overview]
Message-ID: <Z_mdvahT2i6djhj1@z2> (raw)
In-Reply-To: <20250409061043.700792-12-irogers@google.com>

On Tue, Apr 08, 2025 at 11:10:38PM -0700, Ian Rogers wrote:
> Ensure sample reader isn't racing with events being added/removed.
> 
> Signed-off-by: Ian Rogers <irogers@google.com>
> ---
>  tools/perf/util/intel-tpebs.c | 51 ++++++++++++++++++++++++++++++-----
>  1 file changed, 44 insertions(+), 7 deletions(-)
> 
> diff --git a/tools/perf/util/intel-tpebs.c b/tools/perf/util/intel-tpebs.c
> index f648fca17556..c5ccdbc42dc6 100644
> --- a/tools/perf/util/intel-tpebs.c
> +++ b/tools/perf/util/intel-tpebs.c
> @@ -16,6 +16,7 @@
>  #include "debug.h"
>  #include "evlist.h"
>  #include "evsel.h"
> +#include "mutex.h"
>  #include "session.h"
>  #include "tool.h"
>  #include "cpumap.h"
> @@ -32,6 +33,7 @@ bool tpebs_recording;
>  static LIST_HEAD(tpebs_results);
>  static pthread_t tpebs_reader_thread;
>  static struct child_process tpebs_cmd;
> +static struct mutex tpebs_mtx;
>  
>  struct tpebs_retire_lat {
>  	struct list_head nd;
> @@ -51,6 +53,19 @@ struct tpebs_retire_lat {
>  
>  static struct tpebs_retire_lat *tpebs_retire_lat__find(struct evsel *evsel);
>  
> +static void tpebs_mtx_init(void)
> +{
> +	mutex_init(&tpebs_mtx);
> +}
> +
> +static struct mutex *tpebs_mtx_get(void)
> +{
> +	static pthread_once_t tpebs_mtx_once = PTHREAD_ONCE_INIT;
> +
> +	pthread_once(&tpebs_mtx_once, tpebs_mtx_init);
> +	return &tpebs_mtx;
> +}
> +
>  static int evsel__tpebs_start_perf_record(struct evsel *evsel, int control_fd[], int ack_fd[])
>  {
>  	const char **record_argv;
> @@ -59,13 +74,15 @@ static int evsel__tpebs_start_perf_record(struct evsel *evsel, int control_fd[],
>  	char cpumap_buf[50];
>  	struct tpebs_retire_lat *t;
>  
> +	mutex_lock(tpebs_mtx_get());
>  	list_for_each_entry(t, &tpebs_results, nd)
>  		tpebs_event_size++;
>  
>  	record_argv = malloc((10 + 2 * tpebs_event_size) * sizeof(*record_argv));
> -	if (!record_argv)
> +	if (!record_argv) {
> +		mutex_unlock(tpebs_mtx_get());
>  		return -ENOMEM;
> -
> +	}
>  	record_argv[i++] = "perf";
>  	record_argv[i++] = "record";
>  	record_argv[i++] = "-W";
> @@ -101,6 +118,7 @@ static int evsel__tpebs_start_perf_record(struct evsel *evsel, int control_fd[],
>  	list_for_each_entry(t, &tpebs_results, nd)
>  		t->started = true;
>  
> +	mutex_unlock(tpebs_mtx_get());
>  	return ret;
>  }
>  
> @@ -112,9 +130,12 @@ static int process_sample_event(const struct perf_tool *tool __maybe_unused,
>  {
>  	struct tpebs_retire_lat *t;
>  
> +	mutex_lock(tpebs_mtx_get());
>  	t = tpebs_retire_lat__find(evsel);
> -	if (!t)
> +	if (!t) {
> +		mutex_unlock(tpebs_mtx_get());
>  		return -EINVAL;
> +	}
>  	/*
>  	 * Need to handle per core results? We are assuming average retire
>  	 * latency value will be used. Save the number of samples and the sum of
> @@ -123,6 +144,7 @@ static int process_sample_event(const struct perf_tool *tool __maybe_unused,
>  	t->count += 1;
>  	t->sum += sample->retire_lat;
>  	t->val = (double) t->sum / t->count;
> +	mutex_unlock(tpebs_mtx_get());
>  	return 0;
>  }
>  
> @@ -229,7 +251,6 @@ static struct tpebs_retire_lat *tpebs_retire_lat__new(struct evsel *evsel)
>  		return NULL;
>  	}
>  	result->evsel = evsel;
> -	list_add_tail(&result->nd, &tpebs_results);
>  	return result;
>  }
>  
> @@ -282,16 +303,22 @@ static struct tpebs_retire_lat *tpebs_retire_lat__find(struct evsel *evsel)
>  static int evsel__tpebs_prepare(struct evsel *evsel)
>  {
>  	struct evsel *pos;
> -	struct tpebs_retire_lat *tpebs_event = tpebs_retire_lat__find(evsel);
> +	struct tpebs_retire_lat *tpebs_event;
>  
> +	mutex_lock(tpebs_mtx_get());
> +	tpebs_event = tpebs_retire_lat__find(evsel);
>  	if (tpebs_event) {
>  		/* evsel, or an identically named one, was already prepared. */
> +		mutex_unlock(tpebs_mtx_get());
>  		return 0;
>  	}
>  	tpebs_event = tpebs_retire_lat__new(evsel);
>  	if (!tpebs_event)
>  		return -ENOMEM;

The mutex_unlock() is missing here.

Thanks,
Namhyung

>  
> +	list_add_tail(&tpebs_event->nd, &tpebs_results);
> +	mutex_unlock(tpebs_mtx_get());
> +
>  	/*
>  	 * Eagerly prepare all other evsels on the list to try to ensure that by
>  	 * open they are all known.
> @@ -317,6 +344,7 @@ static int evsel__tpebs_prepare(struct evsel *evsel)
>  int evsel__tpebs_open(struct evsel *evsel)
>  {
>  	int ret;
> +	bool tpebs_empty;
>  
>  	/* We should only run tpebs_start when tpebs_recording is enabled. */
>  	if (!tpebs_recording)
> @@ -336,7 +364,10 @@ int evsel__tpebs_open(struct evsel *evsel)
>  	if (ret)
>  		return ret;
>  
> -	if (!list_empty(&tpebs_results)) {
> +	mutex_lock(tpebs_mtx_get());
> +	tpebs_empty = list_empty(&tpebs_results);
> +	mutex_unlock(tpebs_mtx_get());
> +	if (!tpebs_empty) {
>  		struct pollfd pollfd = { .events = POLLIN, };
>  		int control_fd[2], ack_fd[2], len;
>  		char ack_buf[8];
> @@ -436,8 +467,10 @@ int evsel__tpebs_read(struct evsel *evsel, int cpu_map_idx, int thread)
>  	 */
>  	tpebs_stop();
>  
> +	mutex_lock(tpebs_mtx_get());
>  	t = tpebs_retire_lat__find(evsel);
>  	val = rint(t->val);
> +	mutex_unlock(tpebs_mtx_get());
>  
>  	if (old_count) {
>  		count->val = old_count->val + val;
> @@ -460,9 +493,13 @@ int evsel__tpebs_read(struct evsel *evsel, int cpu_map_idx, int thread)
>   */
>  void evsel__tpebs_close(struct evsel *evsel)
>  {
> -	struct tpebs_retire_lat *t = tpebs_retire_lat__find(evsel);
> +	struct tpebs_retire_lat *t;
>  
> +	mutex_lock(tpebs_mtx_get());
> +	t = tpebs_retire_lat__find(evsel);
> +	list_del_init(&t->nd);
>  	tpebs_retire_lat__delete(t);
> +	mutex_unlock(tpebs_mtx_get());
>  
>  	if (list_empty(&tpebs_results))
>  		tpebs_stop();
> -- 
> 2.49.0.504.g3bcea36a83-goog
> 

  reply	other threads:[~2025-04-11 22:54 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-04-09  6:10 [PATCH v4 00/16] Intel TPEBS min/max/mean/last support Ian Rogers
2025-04-09  6:10 ` [PATCH v4 01/16] perf intel-tpebs: Cleanup header Ian Rogers
2025-04-09  6:10 ` [PATCH v4 02/16] perf intel-tpebs: Simplify tpebs_cmd Ian Rogers
2025-04-09  6:10 ` [PATCH v4 03/16] perf intel-tpebs: Rename tpebs_start to evsel__tpebs_open Ian Rogers
2025-04-09  6:10 ` [PATCH v4 04/16] perf intel-tpebs: Separate evsel__tpebs_prepare out of evsel__tpebs_open Ian Rogers
2025-04-09  6:10 ` [PATCH v4 05/16] perf intel-tpebs: Move cpumap_buf " Ian Rogers
2025-04-09  6:10 ` [PATCH v4 06/16] perf intel-tpebs: Reduce scope of tpebs_events_size Ian Rogers
2025-04-09  6:10 ` [PATCH v4 07/16] perf intel-tpebs: Inline get_perf_record_args Ian Rogers
2025-04-09  6:10 ` [PATCH v4 08/16] perf intel-tpebs: Ensure events are opened, factor out finding Ian Rogers
2025-04-09  6:10 ` [PATCH v4 09/16] perf intel-tpebs: Refactor tpebs_results list Ian Rogers
2025-04-09  6:10 ` [PATCH v4 10/16] perf intel-tpebs: Add support for updating counts in evsel__tpebs_read Ian Rogers
2025-04-09  6:10 ` [PATCH v4 11/16] perf intel-tpebs: Add mutex for tpebs_results Ian Rogers
2025-04-11 22:54   ` Namhyung Kim [this message]
2025-04-14 17:00     ` Ian Rogers
2025-04-09  6:10 ` [PATCH v4 12/16] perf intel-tpebs: Don't close record on read Ian Rogers
2025-04-09  6:10 ` [PATCH v4 13/16] perf intel-tpebs: Use stats for retirement latency statistics Ian Rogers
2025-04-09  6:10 ` [PATCH v4 14/16] perf stat: Add mean, min, max and last --tpebs-mode options Ian Rogers
2025-04-09  6:10 ` [PATCH v4 15/16] perf pmu-events: Add retirement latency to JSON events inside of perf Ian Rogers
2025-04-09  6:10 ` [PATCH v4 16/16] perf record: Retirement latency cleanup in evsel__config Ian Rogers
2025-04-10  3:12   ` Wang, Weilin
2025-04-11 23:09 ` [PATCH v4 00/16] Intel TPEBS min/max/mean/last support Namhyung Kim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Z_mdvahT2i6djhj1@z2 \
    --to=namhyung@kernel.org \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=asmadeus@codewreck.org \
    --cc=howardchu95@gmail.com \
    --cc=irogers@google.com \
    --cc=james.clark@linaro.org \
    --cc=john.g.garry@oracle.com \
    --cc=jolsa@kernel.org \
    --cc=kan.liang@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=weilin.wang@intel.com \
    --cc=xu.yang_2@nxp.com \
    --cc=yeoreum.yun@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).