The barrier_nospec() in 34-bit __get_user() is slow. Instead use pointer masking to force the user pointer to all 1's if the access_ok() mispredicted true for an invalid address.
Signed-off-by: Josh Poimboeuf <jpoim...@kernel.org> --- arch/x86/lib/getuser.S | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index 5bce27670baa..7da4fc75eba9 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -35,8 +35,6 @@ #include <asm/asm.h> #include <asm/smap.h> -#define ASM_BARRIER_NOSPEC ALTERNATIVE "", "lfence", X86_FEATURE_LFENCE_RDTSC - .macro mask_user_address size:req .if IS_ENABLED(CONFIG_X86_64) movq $0x0123456789abcdef,%rdx @@ -107,11 +105,7 @@ EXPORT_SYMBOL(__get_user_8) /* .. and the same for __get_user, just without the range checks */ SYM_FUNC_START(__get_user_nocheck_1) -#ifdef CONFIG_X86_64 mask_user_address size=1 -#else - ASM_BARRIER_NOSPEC -#endif ASM_STAC UACCESS movzbl (%_ASM_AX),%edx xor %eax,%eax @@ -121,11 +115,7 @@ SYM_FUNC_END(__get_user_nocheck_1) EXPORT_SYMBOL(__get_user_nocheck_1) SYM_FUNC_START(__get_user_nocheck_2) -#ifdef CONFIG_X86_64 mask_user_address size=2 -#else - ASM_BARRIER_NOSPEC -#endif ASM_STAC UACCESS movzwl (%_ASM_AX),%edx xor %eax,%eax @@ -135,11 +125,7 @@ SYM_FUNC_END(__get_user_nocheck_2) EXPORT_SYMBOL(__get_user_nocheck_2) SYM_FUNC_START(__get_user_nocheck_4) -#ifdef CONFIG_X86_64 mask_user_address size=4 -#else - ASM_BARRIER_NOSPEC -#endif ASM_STAC UACCESS movl (%_ASM_AX),%edx xor %eax,%eax @@ -149,11 +135,7 @@ SYM_FUNC_END(__get_user_nocheck_4) EXPORT_SYMBOL(__get_user_nocheck_4) SYM_FUNC_START(__get_user_nocheck_8) -#ifdef CONFIG_X86_64 mask_user_address size=8 -#else - ASM_BARRIER_NOSPEC -#endif ASM_STAC #ifdef CONFIG_X86_64 UACCESS movq (%_ASM_AX),%rdx -- 2.47.0