From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752419AbbJAGUK (ORCPT ); Thu, 1 Oct 2015 02:20:10 -0400 Received: from mail.r00tworld.net ([144.76.92.157]:46085 "EHLO mail.r00tworld.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1750932AbbJAGUF (ORCPT ); Thu, 1 Oct 2015 02:20:05 -0400 X-Greylist: delayed 576 seconds by postgrey-1.27 at vger.kernel.org; Thu, 01 Oct 2015 02:20:05 EDT Date: Thu, 1 Oct 2015 08:10:26 +0200 From: minipli@ld-linux.so To: Josh Poimboeuf Cc: Thomas Gleixner , Ingo Molnar , "H. Peter Anvin" , x86@kernel.org, linux-kernel@vger.kernel.org, live-patching@vger.kernel.org, Michal Marek , Peter Zijlstra , Andy Lutomirski , Borislav Petkov , Linus Torvalds , Andi Kleen , Pedro Alves , Namhyung Kim , Bernd Petrovitsch , Chris J Arges , Andrew Morton , Herbert Xu , "David S. Miller" Subject: Re: [PATCH v13 13/23] x86/asm/crypto: Create stack frames in aesni-intel_asm.S Message-ID: <20151001061026.GA20908@ld-linux.so> References: MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: User-Agent: Mutt/1.5.23 (2014-03-12) Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org On Tue, Sep 22, 2015 at 10:47:04AM -0500, Josh Poimboeuf wrote: > aesni-intel_asm.S has several callable non-leaf functions which don't > honor CONFIG_FRAME_POINTER, which can result in bad stack traces. > > Create stack frames for them when CONFIG_FRAME_POINTER is enabled. > > Signed-off-by: Josh Poimboeuf > Cc: Herbert Xu > Cc: David S. Miller > --- > arch/x86/crypto/aesni-intel_asm.S | 19 +++++++++++++++++++ > 1 file changed, 19 insertions(+) > > diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S > index 6bd2c6c..289ef12 100644 > --- a/arch/x86/crypto/aesni-intel_asm.S > +++ b/arch/x86/crypto/aesni-intel_asm.S > @@ -31,6 +31,7 @@ > > #include > #include > +#include > > /* > * The following macros are used to move an (un)aligned 16 byte value to/from > @@ -1800,6 +1801,7 @@ ENDPROC(_key_expansion_256b) > * unsigned int key_len) > */ > ENTRY(aesni_set_key) > + FRAME_BEGIN > #ifndef __x86_64__ > pushl KEYP > movl 8(%esp), KEYP # ctx This will break 32 bit builds using the aesni-intel.ko module. You need to adjust the esp-based offsets for the non-x86_64 case, as FRAME_BEGIN may do another push. How about adding a FRAME_OFFSET() macro to to wrap the offsets?: #ifdef CONFIG_FRAME_POINTER # define FRAME_OFFSET(x) ((x) + (BITS_PER_LONG / 8)) #else # define FRAME_OFFSET(x) (x) #endif And using them like this: movl FRAME_OFFSET(8)(%esp), KEYP # ctx > @@ -1905,6 +1907,7 @@ ENTRY(aesni_set_key) > #ifndef __x86_64__ > popl KEYP > #endif > + FRAME_END > ret > ENDPROC(aesni_set_key) > > @@ -1912,6 +1915,7 @@ ENDPROC(aesni_set_key) > * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) > */ > ENTRY(aesni_enc) > + FRAME_BEGIN > #ifndef __x86_64__ > pushl KEYP > pushl KLEN > @@ -1927,6 +1931,7 @@ ENTRY(aesni_enc) > popl KLEN > popl KEYP > #endif > + FRAME_END > ret > ENDPROC(aesni_enc) Here, too.. > > @@ -2101,6 +2106,7 @@ ENDPROC(_aesni_enc4) > * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) > */ > ENTRY(aesni_dec) > + FRAME_BEGIN > #ifndef __x86_64__ > pushl KEYP > pushl KLEN > @@ -2117,6 +2123,7 @@ ENTRY(aesni_dec) > popl KLEN > popl KEYP > #endif > + FRAME_END > ret > ENDPROC(aesni_dec) Dito. > > @@ -2292,6 +2299,7 @@ ENDPROC(_aesni_dec4) > * size_t len) > */ > ENTRY(aesni_ecb_enc) > + FRAME_BEGIN > #ifndef __x86_64__ > pushl LEN > pushl KEYP > @@ -2342,6 +2350,7 @@ ENTRY(aesni_ecb_enc) > popl KEYP > popl LEN > #endif > + FRAME_END > ret > ENDPROC(aesni_ecb_enc) Dito. > > @@ -2350,6 +2359,7 @@ ENDPROC(aesni_ecb_enc) > * size_t len); > */ > ENTRY(aesni_ecb_dec) > + FRAME_BEGIN > #ifndef __x86_64__ > pushl LEN > pushl KEYP > @@ -2401,6 +2411,7 @@ ENTRY(aesni_ecb_dec) > popl KEYP > popl LEN > #endif > + FRAME_END > ret > ENDPROC(aesni_ecb_dec) Dito. > > @@ -2409,6 +2420,7 @@ ENDPROC(aesni_ecb_dec) > * size_t len, u8 *iv) > */ > ENTRY(aesni_cbc_enc) > + FRAME_BEGIN > #ifndef __x86_64__ > pushl IVP > pushl LEN > @@ -2443,6 +2455,7 @@ ENTRY(aesni_cbc_enc) > popl LEN > popl IVP > #endif > + FRAME_END > ret > ENDPROC(aesni_cbc_enc) Dito. > > @@ -2451,6 +2464,7 @@ ENDPROC(aesni_cbc_enc) > * size_t len, u8 *iv) > */ > ENTRY(aesni_cbc_dec) > + FRAME_BEGIN > #ifndef __x86_64__ > pushl IVP > pushl LEN > @@ -2534,6 +2548,7 @@ ENTRY(aesni_cbc_dec) > popl LEN > popl IVP > #endif > + FRAME_END > ret > ENDPROC(aesni_cbc_dec) Dito. > > @@ -2598,6 +2613,7 @@ ENDPROC(_aesni_inc) > * size_t len, u8 *iv) > */ > ENTRY(aesni_ctr_enc) > + FRAME_BEGIN > cmp $16, LEN > jb .Lctr_enc_just_ret > mov 480(KEYP), KLEN > @@ -2651,6 +2667,7 @@ ENTRY(aesni_ctr_enc) > .Lctr_enc_ret: > movups IV, (IVP) > .Lctr_enc_just_ret: > + FRAME_END > ret > ENDPROC(aesni_ctr_enc) > > @@ -2677,6 +2694,7 @@ ENDPROC(aesni_ctr_enc) > * bool enc, u8 *iv) > */ > ENTRY(aesni_xts_crypt8) > + FRAME_BEGIN > cmpb $0, %cl > movl $0, %ecx > movl $240, %r10d > @@ -2777,6 +2795,7 @@ ENTRY(aesni_xts_crypt8) > pxor INC, STATE4 > movdqu STATE4, 0x70(OUTP) > > + FRAME_END > ret > ENDPROC(aesni_xts_crypt8) > Regards, Mathias