On Tue, Sep 22, 2015 at 10:47:04AM -0500, Josh Poimboeuf wrote:
> aesni-intel_asm.S has several callable non-leaf functions which don't
> honor CONFIG_FRAME_POINTER, which can result in bad stack traces.
> 
> Create stack frames for them when CONFIG_FRAME_POINTER is enabled.
> 
> Signed-off-by: Josh Poimboeuf <jpoim...@redhat.com>
> Cc: Herbert Xu <herb...@gondor.apana.org.au>
> Cc: David S. Miller <da...@davemloft.net>
> ---
>  arch/x86/crypto/aesni-intel_asm.S | 19 +++++++++++++++++++
>  1 file changed, 19 insertions(+)
> 
> diff --git a/arch/x86/crypto/aesni-intel_asm.S 
> b/arch/x86/crypto/aesni-intel_asm.S
> index 6bd2c6c..289ef12 100644
> --- a/arch/x86/crypto/aesni-intel_asm.S
> +++ b/arch/x86/crypto/aesni-intel_asm.S
> @@ -31,6 +31,7 @@
>  
>  #include <linux/linkage.h>
>  #include <asm/inst.h>
> +#include <asm/frame.h>
>  
>  /*
>   * The following macros are used to move an (un)aligned 16 byte value to/from
> @@ -1800,6 +1801,7 @@ ENDPROC(_key_expansion_256b)
>   *                   unsigned int key_len)
>   */

>  ENTRY(aesni_set_key)
> +     FRAME_BEGIN
>  #ifndef __x86_64__
>       pushl KEYP
>       movl 8(%esp), KEYP              # ctx

This will break 32 bit builds using the aesni-intel.ko module. You need
to adjust the esp-based offsets for the non-x86_64 case, as FRAME_BEGIN
may do another push.

How about adding a FRAME_OFFSET() macro to <asm/frame.h> to wrap the
offsets?:

#ifdef CONFIG_FRAME_POINTER
# define FRAME_OFFSET(x)        ((x) + (BITS_PER_LONG / 8))
#else
# define FRAME_OFFSET(x)        (x)
#endif

And using them like this:

        movl FRAME_OFFSET(8)(%esp), KEYP                # ctx

> @@ -1905,6 +1907,7 @@ ENTRY(aesni_set_key)
>  #ifndef __x86_64__
>       popl KEYP
>  #endif
> +     FRAME_END
>       ret
>  ENDPROC(aesni_set_key)
>  
> @@ -1912,6 +1915,7 @@ ENDPROC(aesni_set_key)
>   * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
>   */
>  ENTRY(aesni_enc)
> +     FRAME_BEGIN
>  #ifndef __x86_64__
>       pushl KEYP
>       pushl KLEN
> @@ -1927,6 +1931,7 @@ ENTRY(aesni_enc)
>       popl KLEN
>       popl KEYP
>  #endif
> +     FRAME_END
>       ret
>  ENDPROC(aesni_enc)

Here, too..

>  
> @@ -2101,6 +2106,7 @@ ENDPROC(_aesni_enc4)
>   * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
>   */
>  ENTRY(aesni_dec)
> +     FRAME_BEGIN
>  #ifndef __x86_64__
>       pushl KEYP
>       pushl KLEN
> @@ -2117,6 +2123,7 @@ ENTRY(aesni_dec)
>       popl KLEN
>       popl KEYP
>  #endif
> +     FRAME_END
>       ret
>  ENDPROC(aesni_dec)

Dito.

>  
> @@ -2292,6 +2299,7 @@ ENDPROC(_aesni_dec4)
>   *                 size_t len)
>   */
>  ENTRY(aesni_ecb_enc)
> +     FRAME_BEGIN
>  #ifndef __x86_64__
>       pushl LEN
>       pushl KEYP
> @@ -2342,6 +2350,7 @@ ENTRY(aesni_ecb_enc)
>       popl KEYP
>       popl LEN
>  #endif
> +     FRAME_END
>       ret
>  ENDPROC(aesni_ecb_enc)

Dito.

>  
> @@ -2350,6 +2359,7 @@ ENDPROC(aesni_ecb_enc)
>   *                 size_t len);
>   */
>  ENTRY(aesni_ecb_dec)
> +     FRAME_BEGIN
>  #ifndef __x86_64__
>       pushl LEN
>       pushl KEYP
> @@ -2401,6 +2411,7 @@ ENTRY(aesni_ecb_dec)
>       popl KEYP
>       popl LEN
>  #endif
> +     FRAME_END
>       ret
>  ENDPROC(aesni_ecb_dec)

Dito.

>  
> @@ -2409,6 +2420,7 @@ ENDPROC(aesni_ecb_dec)
>   *                 size_t len, u8 *iv)
>   */
>  ENTRY(aesni_cbc_enc)
> +     FRAME_BEGIN
>  #ifndef __x86_64__
>       pushl IVP
>       pushl LEN
> @@ -2443,6 +2455,7 @@ ENTRY(aesni_cbc_enc)
>       popl LEN
>       popl IVP
>  #endif
> +     FRAME_END
>       ret
>  ENDPROC(aesni_cbc_enc)

Dito.

>  
> @@ -2451,6 +2464,7 @@ ENDPROC(aesni_cbc_enc)
>   *                 size_t len, u8 *iv)
>   */
>  ENTRY(aesni_cbc_dec)
> +     FRAME_BEGIN
>  #ifndef __x86_64__
>       pushl IVP
>       pushl LEN
> @@ -2534,6 +2548,7 @@ ENTRY(aesni_cbc_dec)
>       popl LEN
>       popl IVP
>  #endif
> +     FRAME_END
>       ret
>  ENDPROC(aesni_cbc_dec)

Dito.

>  
> @@ -2598,6 +2613,7 @@ ENDPROC(_aesni_inc)
>   *                 size_t len, u8 *iv)
>   */
>  ENTRY(aesni_ctr_enc)
> +     FRAME_BEGIN
>       cmp $16, LEN
>       jb .Lctr_enc_just_ret
>       mov 480(KEYP), KLEN
> @@ -2651,6 +2667,7 @@ ENTRY(aesni_ctr_enc)
>  .Lctr_enc_ret:
>       movups IV, (IVP)
>  .Lctr_enc_just_ret:
> +     FRAME_END
>       ret
>  ENDPROC(aesni_ctr_enc)
>  
> @@ -2677,6 +2694,7 @@ ENDPROC(aesni_ctr_enc)
>   *                    bool enc, u8 *iv)
>   */
>  ENTRY(aesni_xts_crypt8)
> +     FRAME_BEGIN
>       cmpb $0, %cl
>       movl $0, %ecx
>       movl $240, %r10d
> @@ -2777,6 +2795,7 @@ ENTRY(aesni_xts_crypt8)
>       pxor INC, STATE4
>       movdqu STATE4, 0x70(OUTP)
>  
> +     FRAME_END
>       ret
>  ENDPROC(aesni_xts_crypt8)
>  

Regards,
Mathias
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to