From: Peter Zijlstra <peterz@infradead.org>
To: kan.liang@linux.intel.com
Cc: mingo@redhat.com, acme@kernel.org, tglx@linutronix.de,
bp@alien8.de, x86@kernel.org, linux-kernel@vger.kernel.org,
mark.rutland@arm.com, alexander.shishkin@linux.intel.com,
jolsa@redhat.com, namhyung@kernel.org, dave.hansen@intel.com,
yu-cheng.yu@intel.com, bigeasy@linutronix.de, gorcunov@gmail.com,
hpa@zytor.com, alexey.budankov@linux.intel.com,
eranian@google.com, ak@linux.intel.com, like.xu@linux.intel.com,
yao.jin@linux.intel.com
Subject: Re: [PATCH 12/21] perf/x86/intel/lbr: Support Architectural LBR
Date: Fri, 19 Jun 2020 21:08:23 +0200 [thread overview]
Message-ID: <20200619190823.GG576888@hirez.programming.kicks-ass.net> (raw)
In-Reply-To: <1592575449-64278-13-git-send-email-kan.liang@linux.intel.com>
On Fri, Jun 19, 2020 at 07:04:00AM -0700, kan.liang@linux.intel.com wrote:
> +static void intel_pmu_arch_lbr_enable(bool pmi)
> +{
> + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
> + u64 debugctl, lbr_ctl = 0, orig_debugctl;
> +
> + if (pmi)
> + return;
> +
> + if (cpuc->lbr_ctl)
> + lbr_ctl = cpuc->lbr_ctl->config & x86_pmu.lbr_ctl_mask;
> + /*
> + * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
> + * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
> + * may be missed, that can lead to confusing results.
> + */
> + rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
> + orig_debugctl = debugctl;
> + if (lbr_ctl & ARCH_LBR_CALL_STACK)
> + debugctl &= ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
> + else
> + debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
> + if (orig_debugctl != debugctl)
> + wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
> +
> + wrmsrl(MSR_ARCH_LBR_CTL, lbr_ctl | ARCH_LBR_CTL_LBREN);
> +}
This is nearly a duplicate of the old one, surely we can do better?
> +static void intel_pmu_arch_lbr_restore(void *ctx)
> +{
> + struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
> + struct x86_perf_arch_lbr_entry *entries = task_ctx->entries;
> + int i;
> +
> + /* Fast reset the LBRs before restore if the call stack is not full. */
> + if (!entries[x86_pmu.lbr_nr - 1].lbr_from)
> + intel_pmu_arch_lbr_reset();
> +
> + for (i = 0; i < x86_pmu.lbr_nr; i++) {
> + if (!entries[i].lbr_from)
> + break;
> + wrlbr_from(i, entries[i].lbr_from);
> + wrlbr_to(i, entries[i].lbr_to);
> + wrmsrl(MSR_ARCH_LBR_INFO_0 + i, entries[i].lbr_info);
> + }
> +}
This too looks very much like the old one.
> +static void intel_pmu_arch_lbr_save(void *ctx)
> +{
> + struct x86_perf_task_context_arch_lbr *task_ctx = ctx;
> + struct x86_perf_arch_lbr_entry *entries = task_ctx->entries;
> + int i;
> +
> + for (i = 0; i < x86_pmu.lbr_nr; i++) {
> + entries[i].lbr_from = rdlbr_from(i);
> + /* Only save valid branches. */
> + if (!entries[i].lbr_from)
> + break;
> + entries[i].lbr_to = rdlbr_to(i);
> + rdmsrl(MSR_ARCH_LBR_INFO_0 + i, entries[i].lbr_info);
> + }
> +
> + /* LBR call stack is not full. Reset is required in restore. */
> + if (i < x86_pmu.lbr_nr)
> + entries[x86_pmu.lbr_nr - 1].lbr_from = 0;
> +}
And again..
> +static void __intel_pmu_arch_lbr_read(struct cpu_hw_events *cpuc, int index,
> + u64 from, u64 to, u64 info)
> +{
> + u64 mis = 0, pred = 0, in_tx = 0, abort = 0, type = 0;
> + u32 br_type, to_plm;
> + u16 cycles = 0;
> +
> + if (x86_pmu.arch_lbr_mispred) {
> + mis = !!(info & ARCH_LBR_INFO_MISPRED);
> + pred = !mis;
> + }
> + in_tx = !!(info & ARCH_LBR_INFO_IN_TSX);
> + abort = !!(info & ARCH_LBR_INFO_TSX_ABORT);
> + if (x86_pmu.arch_lbr_timed_lbr &&
> + (info & ARCH_LBR_INFO_CYC_CNT_VALID))
> + cycles = (info & ARCH_LBR_INFO_CYC_CNT);
> +
> + /*
> + * Parse the branch type recorded in LBR_x_INFO MSR.
> + * Doesn't support OTHER_BRANCH decoding for now.
> + * OTHER_BRANCH branch type still rely on software decoding.
> + */
> + if (x86_pmu.arch_lbr_br_type) {
> + br_type = (info & ARCH_LBR_INFO_BR_TYPE) >> ARCH_LBR_INFO_BR_TYPE_OFFSET;
> +
> + if (br_type <= ARCH_LBR_BR_TYPE_KNOWN_MAX) {
> + to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
> + type = arch_lbr_br_type_map[br_type] | to_plm;
> + }
> + }
> +
> + cpuc->lbr_entries[index].from = from;
> + cpuc->lbr_entries[index].to = to;
> + cpuc->lbr_entries[index].mispred = mis;
> + cpuc->lbr_entries[index].predicted = pred;
> + cpuc->lbr_entries[index].in_tx = in_tx;
> + cpuc->lbr_entries[index].abort = abort;
> + cpuc->lbr_entries[index].cycles = cycles;
> + cpuc->lbr_entries[index].type = type;
> + cpuc->lbr_entries[index].reserved = 0;
> +}
> +
> +static void intel_pmu_arch_lbr_read(struct cpu_hw_events *cpuc)
> +{
> + u64 from, to, info;
> + int i;
> +
> + for (i = 0; i < x86_pmu.lbr_nr; i++) {
> + from = rdlbr_from(i);
> + to = rdlbr_to(i);
> +
> + /*
> + * Read LBR entries until invalid entry (0s) is detected.
> + */
> + if (!from)
> + break;
> +
> + rdmsrl(MSR_ARCH_LBR_INFO_0 + i, info);
> +
> + __intel_pmu_arch_lbr_read(cpuc, i, from, to, info);
> + }
> +
> + cpuc->lbr_stack.nr = i;
> +}
> +static void intel_pmu_store_pebs_arch_lbrs(struct pebs_lbr *pebs_lbr,
> + struct cpu_hw_events *cpuc)
> +{
> + struct pebs_lbr_entry *lbr;
> + int i;
> +
> + for (i = 0; i < x86_pmu.lbr_nr; i++) {
> + lbr = &pebs_lbr->lbr[i];
> +
> + /*
> + * Read LBR entries until invalid entry (0s) is detected.
> + */
> + if (!lbr->from)
> + break;
> +
> + __intel_pmu_arch_lbr_read(cpuc, i, lbr->from,
> + lbr->to, lbr->info);
> + }
> +
> + cpuc->lbr_stack.nr = i;
> + intel_pmu_lbr_filter(cpuc);
> +}
Unless I'm reading cross-eyed again, that too is very similar to what we
already had.
next prev parent reply other threads:[~2020-06-19 19:08 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-06-19 14:03 [PATCH 00/21] Support Architectural LBR kan.liang
2020-06-19 14:03 ` [PATCH 01/21] x86/cpufeatures: Add Architectural LBRs feature bit kan.liang
2020-06-19 14:03 ` [PATCH 02/21] perf/x86/intel/lbr: Add pointers for LBR enable and disable kan.liang
2020-06-19 14:03 ` [PATCH 03/21] perf/x86/intel/lbr: Add pointer for LBR reset kan.liang
2020-06-19 14:03 ` [PATCH 04/21] perf/x86/intel/lbr: Add pointer for LBR read kan.liang
2020-06-19 14:03 ` [PATCH 05/21] perf/x86/intel/lbr: Add pointers for LBR save and restore kan.liang
2020-06-19 14:03 ` [PATCH 06/21] perf/x86/intel/lbr: Factor out a new struct for generic optimization kan.liang
2020-06-19 14:03 ` [PATCH 07/21] perf/x86/intel/lbr: Use dynamic data structure for task_ctx kan.liang
2020-06-19 14:03 ` [PATCH 08/21] x86/msr-index: Add bunch of MSRs for Arch LBR kan.liang
2020-06-19 19:11 ` Peter Zijlstra
2020-06-19 14:03 ` [PATCH 09/21] perf/x86: Expose CPUID enumeration bits for arch LBR kan.liang
2020-06-19 18:31 ` Peter Zijlstra
2020-06-19 14:03 ` [PATCH 10/21] perf/x86/intel: Check Arch LBR MSRs kan.liang
2020-06-19 14:03 ` [PATCH 11/21] perf/x86/intel/lbr: Support LBR_CTL kan.liang
2020-06-19 18:40 ` Peter Zijlstra
2020-06-19 19:15 ` Liang, Kan
2020-06-19 19:22 ` Peter Zijlstra
2020-06-19 14:04 ` [PATCH 12/21] perf/x86/intel/lbr: Support Architectural LBR kan.liang
2020-06-19 19:08 ` Peter Zijlstra [this message]
2020-06-19 19:40 ` Liang, Kan
2020-06-19 14:04 ` [PATCH 13/21] perf/core: Factor out functions to allocate/free the task_ctx_data kan.liang
2020-06-19 14:04 ` [PATCH 14/21] perf/core: Use kmem_cache to allocate the PMU specific data kan.liang
2020-06-19 14:04 ` [PATCH 15/21] perf/x86/intel/lbr: Create kmem_cache for the LBR context data kan.liang
2020-06-19 14:04 ` [PATCH 16/21] perf/x86: Remove task_ctx_size kan.liang
2020-06-19 14:04 ` [PATCH 17/21] x86/fpu: Use proper mask to replace full instruction mask kan.liang
2020-06-19 19:31 ` Peter Zijlstra
2020-06-22 14:52 ` Liang, Kan
2020-06-22 15:02 ` Dave Hansen
2020-06-22 17:47 ` Liang, Kan
2020-06-22 18:05 ` Dave Hansen
2020-06-22 18:46 ` Liang, Kan
2020-06-19 14:04 ` [PATCH 18/21] x86/fpu/xstate: Support dynamic supervisor feature for LBR kan.liang
2020-06-19 14:04 ` [PATCH 19/21] x86/fpu/xstate: Add helpers for LBR dynamic supervisor feature kan.liang
2020-06-19 14:04 ` [PATCH 20/21] perf/x86/intel/lbr: Support XSAVES/XRSTORS for LBR context switch kan.liang
2020-06-19 19:41 ` Peter Zijlstra
2020-06-19 22:28 ` Liang, Kan
2020-06-19 14:04 ` [PATCH 21/21] perf/x86/intel/lbr: Support XSAVES for arch LBR read kan.liang
2020-06-22 18:49 ` Cyrill Gorcunov
2020-06-22 19:11 ` Liang, Kan
2020-06-22 19:31 ` Cyrill Gorcunov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200619190823.GG576888@hirez.programming.kicks-ass.net \
--to=peterz@infradead.org \
--cc=acme@kernel.org \
--cc=ak@linux.intel.com \
--cc=alexander.shishkin@linux.intel.com \
--cc=alexey.budankov@linux.intel.com \
--cc=bigeasy@linutronix.de \
--cc=bp@alien8.de \
--cc=dave.hansen@intel.com \
--cc=eranian@google.com \
--cc=gorcunov@gmail.com \
--cc=hpa@zytor.com \
--cc=jolsa@redhat.com \
--cc=kan.liang@linux.intel.com \
--cc=like.xu@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mark.rutland@arm.com \
--cc=mingo@redhat.com \
--cc=namhyung@kernel.org \
--cc=tglx@linutronix.de \
--cc=x86@kernel.org \
--cc=yao.jin@linux.intel.com \
--cc=yu-cheng.yu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox