public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: kan.liang@linux.intel.com
Cc: mingo@redhat.com, acme@kernel.org, namhyung@kernel.org,
	irogers@google.com, adrian.hunter@intel.com,
	linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org,
	ak@linux.intel.com, eranian@google.com,
	dapeng1.mi@linux.intel.com
Subject: Re: [PATCH V9 3/3] perf/x86/intel: Support PEBS counters snapshotting
Date: Thu, 16 Jan 2025 13:02:03 +0100	[thread overview]
Message-ID: <20250116120203.GK8362@noisy.programming.kicks-ass.net> (raw)
In-Reply-To: <20250115184318.2854459-3-kan.liang@linux.intel.com>

On Wed, Jan 15, 2025 at 10:43:18AM -0800, kan.liang@linux.intel.com wrote:

> diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
> index 81b6ec8e824e..10ce80230ad3 100644
> --- a/arch/x86/events/intel/ds.c
> +++ b/arch/x86/events/intel/ds.c
> @@ -1294,6 +1294,19 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
>  	ds->pebs_interrupt_threshold = threshold;
>  }
>  
> +#define PEBS_DATACFG_CNTRS(x)						\
> +	((x >> PEBS_DATACFG_CNTR_SHIFT) & PEBS_DATACFG_CNTR_MASK)
> +
> +#define PEBS_DATACFG_CNTR_BIT(x)					\
> +	(((1ULL << x) & PEBS_DATACFG_CNTR_MASK) << PEBS_DATACFG_CNTR_SHIFT)
> +
> +#define PEBS_DATACFG_FIX(x)						\
> +	((x >> PEBS_DATACFG_FIX_SHIFT) & PEBS_DATACFG_FIX_MASK)
> +
> +#define PEBS_DATACFG_FIX_BIT(x)						\
> +	(((1ULL << (x - INTEL_PMC_IDX_FIXED)) & PEBS_DATACFG_FIX_MASK)	\
> +	 << PEBS_DATACFG_FIX_SHIFT)

That INTEL_PMX_IDX_FIXED does not belong here, needs to be at the
callsite.


> @@ -1914,6 +1975,34 @@ static void adaptive_pebs_save_regs(struct pt_regs *regs,
>  #endif
>  }
>  
> +static void intel_perf_event_pmc_to_count(struct perf_event *event, u64 pmc)
> +{
> +	int shift = 64 - x86_pmu.cntval_bits;
> +	struct hw_perf_event *hwc;
> +	u64 delta, prev_pmc;
> +
> +	/*
> +	 * The PEBS record doesn't shrink on pmu::del().
> +	 * See pebs_update_state().
> +	 * Ignore the non-exist event.
> +	 */
> +	if (!event)
> +		return;
> +
> +	hwc = &event->hw;
> +	prev_pmc = local64_read(&hwc->prev_count);
> +
> +	/* Only update the count when the PMU is disabled */
> +	WARN_ON(this_cpu_read(cpu_hw_events.enabled));
> +	local64_set(&hwc->prev_count, pmc);
> +
> +	delta = (pmc << shift) - (prev_pmc << shift);
> +	delta >>= shift;
> +
> +	local64_add(delta, &event->count);
> +	local64_sub(delta, &hwc->period_left);
> +}

Name and function don't really match at this point. When I suggested
this function it returned a count value, now it's closer to
intel_perf_event_update_pmc() or somesuch.



> @@ -2049,6 +2138,61 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
>  		}
>  	}
>  
> +	if (format_group & (PEBS_DATACFG_CNTR | PEBS_DATACFG_METRICS)) {
> +		struct pebs_cntr_header *cntr = next_record;
> +		int bit;
> +
> +		next_record += sizeof(struct pebs_cntr_header);
> +
> +		/*
> +		 * The PEBS_DATA_CFG is a global register, which is the
> +		 * superset configuration for all PEBS events.
> +		 * For the PEBS record of non-sample-read group, ignore
> +		 * the counter snapshot fields.
> +		 */
> +		if (!is_pebs_counter_event_group(event)) {
> +			unsigned int nr;
> +
> +			nr = bitmap_weight((unsigned long *)&cntr->cntr, INTEL_PMC_MAX_GENERIC) +
> +			     bitmap_weight((unsigned long *)&cntr->fixed, INTEL_PMC_MAX_FIXED);

hweight32 ?

> +			if (cntr->metrics == INTEL_CNTR_METRICS)
> +				nr += 2;
> +			next_record += nr * sizeof(u64);
> +			goto end_cntr;

Yuck, can't you break out stuff into a helper function to get rid of
that goto?

> +		}
> +
> +		for_each_set_bit(bit, (unsigned long *)&cntr->cntr, INTEL_PMC_MAX_GENERIC) {
> +			intel_perf_event_pmc_to_count(cpuc->events[bit], *(u64 *)next_record);
> +			next_record += sizeof(u64);
> +		}
> +
> +		for_each_set_bit(bit, (unsigned long *)&cntr->fixed, INTEL_PMC_MAX_FIXED) {
> +			/* The slots event will be handled with perf_metric later */
> +			if ((cntr->metrics == INTEL_CNTR_METRICS) &&
> +			    (bit + INTEL_PMC_IDX_FIXED == INTEL_PMC_IDX_FIXED_SLOTS)) {
> +				next_record += sizeof(u64);
> +				continue;
> +			}
> +			intel_perf_event_pmc_to_count(cpuc->events[bit + INTEL_PMC_IDX_FIXED],
> +						      *(u64 *)next_record);
> +			next_record += sizeof(u64);
> +		}
> +
> +		/* HW will reload the value right after the overflow. */
> +		if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
> +			local64_set(&event->hw.prev_count, (u64)-event->hw.sample_period);
> +
> +		if (cntr->metrics == INTEL_CNTR_METRICS) {
> +			static_call(intel_pmu_update_topdown_event)
> +				   (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS],
> +				    (u64 *)next_record);
> +			next_record += 2 * sizeof(u64);
> +		}
> +		data->sample_flags |= PERF_SAMPLE_READ;
> +	}
> +
> +end_cntr:
> +
>  	WARN_ONCE(next_record != __pebs + basic->format_size,
>  			"PEBS record size %u, expected %llu, config %llx\n",
>  			basic->format_size,

  parent reply	other threads:[~2025-01-16 12:02 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-01-15 18:43 [PATCH V9 1/3] perf/x86/intel: Avoid pmu_disable/enable if !cpuc->enabled in sample read kan.liang
2025-01-15 18:43 ` [PATCH V9 2/3] perf: Avoid the read if the count is already updated kan.liang
2025-01-15 18:43 ` [PATCH V9 3/3] perf/x86/intel: Support PEBS counters snapshotting kan.liang
2025-01-16 11:47   ` Peter Zijlstra
2025-01-16 15:55     ` Liang, Kan
2025-01-16 20:42       ` Peter Zijlstra
2025-01-16 20:56         ` Peter Zijlstra
2025-01-16 21:50           ` Liang, Kan
2025-01-21 15:25             ` Liang, Kan
2025-01-23  9:14             ` Peter Zijlstra
2025-01-23 15:36               ` Liang, Kan
2025-01-16 12:02   ` Peter Zijlstra [this message]
2025-01-16 10:32 ` [PATCH V9 1/3] perf/x86/intel: Avoid pmu_disable/enable if !cpuc->enabled in sample read Peter Zijlstra
2025-01-16 10:51   ` Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250116120203.GK8362@noisy.programming.kicks-ass.net \
    --to=peterz@infradead.org \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=ak@linux.intel.com \
    --cc=dapeng1.mi@linux.intel.com \
    --cc=eranian@google.com \
    --cc=irogers@google.com \
    --cc=kan.liang@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox