From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from foss.arm.com ([217.140.101.70]:47904 "EHLO foss.arm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753671AbcBAMcV (ORCPT ); Mon, 1 Feb 2016 07:32:21 -0500 Date: Mon, 1 Feb 2016 12:34:13 +0000 From: Lorenzo Pieralisi To: gregkh@linuxfoundation.org Cc: linux@roeck-us.net, mark.rutland@arm.com, peter.maydell@linaro.org, stable@vger.kernel.org, will.deacon@arm.com Subject: Re: FAILED: patch "[PATCH] arm64: kernel: fix architected PMU registers unconditional" failed to apply to 4.1-stable tree Message-ID: <20160201123413.GA10764@red-moon> References: <1454267851666@kroah.com> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <1454267851666@kroah.com> Sender: stable-owner@vger.kernel.org List-ID: Hi Greg, On Sun, Jan 31, 2016 at 11:17:31AM -0800, gregkh@linuxfoundation.org wrote: > > The patch below does not apply to the 4.1-stable tree. > If someone wants it applied there, or to any other stable or longterm > tree, then please email the backport, including the original git commit > id to . I will also backport a dependendency and send them to stable as a series. What's the standard mechanism for tagging backports (ie how do you prefer me stating which stable kernel version the series should be applied to - eg v4.1.17 ?) Usual Cc: tag or a line in the commit log would do ? Thanks, Lorenzo > > thanks, > > greg k-h > > ------------------ original commit in Linus's tree ------------------ > > From f436b2ac90a095746beb6729b8ee8ed87c9eaede Mon Sep 17 00:00:00 2001 > From: Lorenzo Pieralisi > Date: Wed, 13 Jan 2016 14:50:03 +0000 > Subject: [PATCH] arm64: kernel: fix architected PMU registers unconditional > access > > The Performance Monitors extension is an optional feature of the > AArch64 architecture, therefore, in order to access Performance > Monitors registers safely, the kernel should detect the architected > PMU unit presence through the ID_AA64DFR0_EL1 register PMUVer field > before accessing them. > > This patch implements a guard by reading the ID_AA64DFR0_EL1 register > PMUVer field to detect the architected PMU presence and prevent accessing > PMU system registers if the Performance Monitors extension is not > implemented in the core. > > Cc: Peter Maydell > Cc: Mark Rutland > Cc: > Fixes: 60792ad349f3 ("arm64: kernel: enforce pmuserenr_el0 initialization and restore") > Signed-off-by: Lorenzo Pieralisi > Reported-by: Guenter Roeck > Tested-by: Guenter Roeck > Signed-off-by: Will Deacon > > diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S > index ffe9c2b6431b..917d98108b3f 100644 > --- a/arch/arm64/kernel/head.S > +++ b/arch/arm64/kernel/head.S > @@ -514,9 +514,14 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems > #endif > > /* EL2 debug */ > + mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer > + sbfx x0, x0, #8, #4 > + cmp x0, #1 > + b.lt 4f // Skip if no PMU present > mrs x0, pmcr_el0 // Disable debug access traps > ubfx x0, x0, #11, #5 // to EL2 and allow access to > msr mdcr_el2, x0 // all PMU counters from EL1 > +4: > > /* Stage-2 translation */ > msr vttbr_el2, xzr > diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S > index 146bd99a7532..e6a30e1268a8 100644 > --- a/arch/arm64/mm/proc-macros.S > +++ b/arch/arm64/mm/proc-macros.S > @@ -84,3 +84,15 @@ > b.lo 9998b > dsb \domain > .endm > + > +/* > + * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present > + */ > + .macro reset_pmuserenr_el0, tmpreg > + mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer > + sbfx \tmpreg, \tmpreg, #8, #4 > + cmp \tmpreg, #1 // Skip if no PMU present > + b.lt 9000f > + msr pmuserenr_el0, xzr // Disable PMU access from EL0 > +9000: > + .endm > diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S > index a3d867e723b4..c164d2cb35c0 100644 > --- a/arch/arm64/mm/proc.S > +++ b/arch/arm64/mm/proc.S > @@ -117,7 +117,7 @@ ENTRY(cpu_do_resume) > */ > ubfx x11, x11, #1, #1 > msr oslar_el1, x11 > - msr pmuserenr_el0, xzr // Disable PMU access from EL0 > + reset_pmuserenr_el0 x0 // Disable PMU access from EL0 > mov x0, x12 > dsb nsh // Make sure local tlb invalidation completed > isb > @@ -154,7 +154,7 @@ ENTRY(__cpu_setup) > msr cpacr_el1, x0 // Enable FP/ASIMD > mov x0, #1 << 12 // Reset mdscr_el1 and disable > msr mdscr_el1, x0 // access to the DCC from EL0 > - msr pmuserenr_el0, xzr // Disable PMU access from EL0 > + reset_pmuserenr_el0 x0 // Disable PMU access from EL0 > /* > * Memory region attributes for LPAE: > * >