In preparation for converting some __uaccess_begin() instances to
__uacess_begin_nospec(), make sure all 'from user' uaccess paths are
using the _begin(), _end() helpers rather than open-coded stac() and
clac().

There are no functional changes in this patch.

Suggested-by: Ingo Molnar <mi...@redhat.com>
Cc: Tom Lendacky <thomas.lenda...@amd.com>
Cc: Al Viro <v...@zeniv.linux.org.uk>
Cc: Kees Cook <keesc...@chromium.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: "H. Peter Anvin" <h...@zytor.com>
Cc: x...@kernel.org
Signed-off-by: Dan Williams <dan.j.willi...@intel.com>
---
 arch/x86/lib/usercopy_32.c |    8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 1b377f734e64..de3436719e26 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -331,12 +331,12 @@ do {                                                      
                \
 
 unsigned long __copy_user_ll(void *to, const void *from, unsigned long n)
 {
-       stac();
+       __uaccess_begin();
        if (movsl_is_ok(to, from, n))
                __copy_user(to, from, n);
        else
                n = __copy_user_intel(to, from, n);
-       clac();
+       __uaccess_end();
        return n;
 }
 EXPORT_SYMBOL(__copy_user_ll);
@@ -344,7 +344,7 @@ EXPORT_SYMBOL(__copy_user_ll);
 unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user 
*from,
                                        unsigned long n)
 {
-       stac();
+       __uaccess_begin();
 #ifdef CONFIG_X86_INTEL_USERCOPY
        if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
                n = __copy_user_intel_nocache(to, from, n);
@@ -353,7 +353,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, 
const void __user *fr
 #else
        __copy_user(to, from, n);
 #endif
-       clac();
+       __uaccess_end();
        return n;
 }
 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);

Reply via email to