For known compile-time fixed sizes, teach x86-32 copy_to_user() to
convert them to the simpler put_user and inline it similar to the
optimisation applied to copy_from_user() and already used by x86-64.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: "H. Peter Anvin" <h...@zytor.com>
---
 arch/x86/include/asm/uaccess_32.h | 48 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 48 insertions(+)

diff --git a/arch/x86/include/asm/uaccess_32.h 
b/arch/x86/include/asm/uaccess_32.h
index 44d17d1ab07c..a02aa9db34ed 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -16,6 +16,54 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
 static __always_inline unsigned long __must_check
 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+       if (__builtin_constant_p(n)) {
+               unsigned long ret = 0;
+
+               switch (n) {
+               case 1:
+                       __uaccess_begin();
+                       __put_user_asm(*(u8 *)from, to, ret,
+                                       "b", "b", "iq", 1);
+                       __uaccess_end();
+                       return ret;
+               case 2:
+                       __uaccess_begin();
+                       __put_user_asm(*(u16 *)from, to, ret,
+                                       "w", "w", "ir", 2);
+                       __uaccess_end();
+                       return ret;
+               case 4:
+                       __uaccess_begin();
+                       __put_user_asm(*(u32 *)from, to, ret,
+                                       "l", "k", "ir", 4);
+                       __uaccess_end();
+                       return ret;
+               case 6:
+                       __uaccess_begin();
+                       __put_user_asm(*(u32 *)from, to, ret,
+                                       "l", "k", "ir", 4);
+                       if (likely(!ret)) {
+                               asm("":::"memory");
+                               __put_user_asm(*(u16 *)(4 + (char *)from),
+                                               (u16 __user *)(4 + (char __user 
*)to),
+                                               ret, "w", "w", "ir", 2);
+                       }
+                       __uaccess_end();
+                       return ret;
+               case 8:
+                       __uaccess_begin();
+                       __put_user_asm(*(u32 *)from, to, ret,
+                                       "l", "k", "ir", 4);
+                       if (likely(!ret)) {
+                               asm("":::"memory");
+                               __put_user_asm(*(u32 *)(4 + (char *)from),
+                                               (u32 __user *)(4 + (char __user 
*)to),
+                                               ret, "l", "k", "ir", 4);
+                       }
+                       __uaccess_end();
+                       return ret;
+               }
+       }
        return __copy_user_ll((__force void *)to, from, n);
 }
 
-- 
2.11.0

Reply via email to