On Mon, Nov 23, 2020 at 01:49:07PM -0700, dann frazier wrote:
> On Mon, Nov 26, 2018 at 11:50:32AM +0100, Greg Kroah-Hartman wrote:
> > 4.4-stable review patch.  If anyone has any objections, please let me know.
> 
> fyi, I bisected a regression down to this commit. This apparently
> causes an ADR_PREL_PG_HI21 relocation to be added to the sha{1,2}_ce
> modules. Back in 4.4 ADR_PREL_PG_HI21 relocations were forbidden if
> built with CONFIG_ARM64_ERRATUM_843419=y, so now the sha{1,2}_ce modules
> fail to load:
> 
> [   37.866250] module sha1_ce: unsupported RELA relocation: 275
> 
> Looks like it should be an issue for 4.14.y as well, but I haven't yet
> tested it.

This regression appears to be limited to 4.4.y. I didn't find it when
testing 4.9.y, and a 2nd bisection determined that it is because
4.9.y+ also contains a backport of commit 41c066f ("arm64: assembler:
make adr_l work in modules under KASLR"). That was pulled from 4.4.y
because it caused a build failure:

  https://www.spinics.net/lists/stable/msg179709.html

Shall I submit a revert of this patch for 4.4.y, or is it worth trying
to get a backport of 41c066f to work?

  -dann
  
> > From: Ard Biesheuvel <ard.biesheu...@linaro.org>
> > 
> > commit f4857f4c2ee9aa4e2aacac1a845352b00197fb57 upstream.
> > 
> > Replace the inline asm which exports struct offsets as ELF symbols
> > with proper const variables exposing the same values. This works
> > around an issue with Clang which does not interpret the "i" (or "I")
> > constraints in the same way as GCC.
> > 
> > Signed-off-by: Ard Biesheuvel <ard.biesheu...@linaro.org>
> > Tested-by: Matthias Kaehlcke <m...@chromium.org>
> > Signed-off-by: Herbert Xu <herb...@gondor.apana.org.au>
> > Signed-off-by: Nathan Chancellor <natechancel...@gmail.com>
> > Signed-off-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>
> > ---
> >  arch/arm64/crypto/sha1-ce-core.S |    6 ++++--
> >  arch/arm64/crypto/sha1-ce-glue.c |   11 +++--------
> >  arch/arm64/crypto/sha2-ce-core.S |    6 ++++--
> >  arch/arm64/crypto/sha2-ce-glue.c |   13 +++++--------
> >  4 files changed, 16 insertions(+), 20 deletions(-)
> > 
> > --- a/arch/arm64/crypto/sha1-ce-core.S
> > +++ b/arch/arm64/crypto/sha1-ce-core.S
> > @@ -82,7 +82,8 @@ ENTRY(sha1_ce_transform)
> >     ldr             dgb, [x0, #16]
> >  
> >     /* load sha1_ce_state::finalize */
> > -   ldr             w4, [x0, #:lo12:sha1_ce_offsetof_finalize]
> > +   ldr_l           w4, sha1_ce_offsetof_finalize, x4
> > +   ldr             w4, [x0, x4]
> >  
> >     /* load input */
> >  0: ld1             {v8.4s-v11.4s}, [x1], #64
> > @@ -132,7 +133,8 @@ CPU_LE( rev32           v11.16b, v11.16b        )
> >      * the padding is handled by the C code in that case.
> >      */
> >     cbz             x4, 3f
> > -   ldr             x4, [x0, #:lo12:sha1_ce_offsetof_count]
> > +   ldr_l           w4, sha1_ce_offsetof_count, x4
> > +   ldr             x4, [x0, x4]
> >     movi            v9.2d, #0
> >     mov             x8, #0x80000000
> >     movi            v10.2d, #0
> > --- a/arch/arm64/crypto/sha1-ce-glue.c
> > +++ b/arch/arm64/crypto/sha1-ce-glue.c
> > @@ -17,9 +17,6 @@
> >  #include <linux/crypto.h>
> >  #include <linux/module.h>
> >  
> > -#define ASM_EXPORT(sym, val) \
> > -   asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
> > -
> >  MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
> >  MODULE_AUTHOR("Ard Biesheuvel <ard.biesheu...@linaro.org>");
> >  MODULE_LICENSE("GPL v2");
> > @@ -32,6 +29,9 @@ struct sha1_ce_state {
> >  asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> >                               int blocks);
> >  
> > +const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, 
> > sst.count);
> > +const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, 
> > finalize);
> > +
> >  static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
> >                       unsigned int len)
> >  {
> > @@ -52,11 +52,6 @@ static int sha1_ce_finup(struct shash_de
> >     struct sha1_ce_state *sctx = shash_desc_ctx(desc);
> >     bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
> >  
> > -   ASM_EXPORT(sha1_ce_offsetof_count,
> > -              offsetof(struct sha1_ce_state, sst.count));
> > -   ASM_EXPORT(sha1_ce_offsetof_finalize,
> > -              offsetof(struct sha1_ce_state, finalize));
> > -
> >     /*
> >      * Allow the asm code to perform the finalization if there is no
> >      * partial data and the input is a round multiple of the block size.
> > --- a/arch/arm64/crypto/sha2-ce-core.S
> > +++ b/arch/arm64/crypto/sha2-ce-core.S
> > @@ -88,7 +88,8 @@ ENTRY(sha2_ce_transform)
> >     ld1             {dgav.4s, dgbv.4s}, [x0]
> >  
> >     /* load sha256_ce_state::finalize */
> > -   ldr             w4, [x0, #:lo12:sha256_ce_offsetof_finalize]
> > +   ldr_l           w4, sha256_ce_offsetof_finalize, x4
> > +   ldr             w4, [x0, x4]
> >  
> >     /* load input */
> >  0: ld1             {v16.4s-v19.4s}, [x1], #64
> > @@ -136,7 +137,8 @@ CPU_LE( rev32           v19.16b, v19.16b        )
> >      * the padding is handled by the C code in that case.
> >      */
> >     cbz             x4, 3f
> > -   ldr             x4, [x0, #:lo12:sha256_ce_offsetof_count]
> > +   ldr_l           w4, sha256_ce_offsetof_count, x4
> > +   ldr             x4, [x0, x4]
> >     movi            v17.2d, #0
> >     mov             x8, #0x80000000
> >     movi            v18.2d, #0
> > --- a/arch/arm64/crypto/sha2-ce-glue.c
> > +++ b/arch/arm64/crypto/sha2-ce-glue.c
> > @@ -17,9 +17,6 @@
> >  #include <linux/crypto.h>
> >  #include <linux/module.h>
> >  
> > -#define ASM_EXPORT(sym, val) \
> > -   asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
> > -
> >  MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto 
> > Extensions");
> >  MODULE_AUTHOR("Ard Biesheuvel <ard.biesheu...@linaro.org>");
> >  MODULE_LICENSE("GPL v2");
> > @@ -32,6 +29,11 @@ struct sha256_ce_state {
> >  asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const 
> > *src,
> >                               int blocks);
> >  
> > +const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
> > +                                         sst.count);
> > +const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
> > +                                            finalize);
> > +
> >  static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
> >                         unsigned int len)
> >  {
> > @@ -52,11 +54,6 @@ static int sha256_ce_finup(struct shash_
> >     struct sha256_ce_state *sctx = shash_desc_ctx(desc);
> >     bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
> >  
> > -   ASM_EXPORT(sha256_ce_offsetof_count,
> > -              offsetof(struct sha256_ce_state, sst.count));
> > -   ASM_EXPORT(sha256_ce_offsetof_finalize,
> > -              offsetof(struct sha256_ce_state, finalize));
> > -
> >     /*
> >      * Allow the asm code to perform the finalization if there is no
> >      * partial data and the input is a round multiple of the block size.
> > 
> > 

Reply via email to