On Tue, May 12, 2015 at 11:53:20PM +0200, Borislav Petkov wrote:
> > That said, I think you should uninline those things, and move them
> > from a header file to a C file (arch/x86/lib/uaccess.c?).

It is starting to look better wrt size:

x86_64_defconfig:

                   text    data     bss     dec     hex filename
before:         12375798        1812800 1085440 15274038         e91036 vmlinux
after:          12269658        1812800 1085440 15167898         e7719a vmlinux

Now we CALL _copy_*_user which does CALL the optimal alternative
version. Advantage is that we're saving some space and alternatives
application for copy_user* is being done in less places, i.e.
arch/x86/lib/uaccess_64.c. If I move all copy_user_generic() callers
there, it would be the only compilation unit where the alternatives will
be done.

The disadvantage is that we have CALL after CALL and I wanted to have a
single CALL directly to the optimal copy_user function. That'll cost us
space, though, and more alternatives sites to patch during boot...

Thoughts?

Here's the first step only:

---
 arch/x86/include/asm/uaccess.h    |  7 ++-----
 arch/x86/include/asm/uaccess_64.h | 44 ++++-----------------------------------
 arch/x86/lib/Makefile             |  2 +-
 arch/x86/lib/uaccess_64.c         | 42 +++++++++++++++++++++++++++++++++++++
 4 files changed, 49 insertions(+), 46 deletions(-)
 create mode 100644 arch/x86/lib/uaccess_64.c

diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 098f3fd5cc75..bdd5234fe9a3 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -642,11 +642,8 @@ extern struct movsl_mask {
 # include <asm/uaccess_64.h>
 #endif
 
-extern __always_inline __must_check
-int _copy_from_user(void *dst, const void __user *src, unsigned size);
-
-extern __always_inline __must_check
-int _copy_to_user(void __user *dst, const void *src, unsigned size);
+extern __must_check int _copy_from_user(void *dst, const void __user *src, 
unsigned size);
+extern __must_check int _copy_to_user(void __user *dst, const void *src, 
unsigned size);
 
 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
 # define copy_user_diag __compiletime_error
diff --git a/arch/x86/include/asm/uaccess_64.h 
b/arch/x86/include/asm/uaccess_64.h
index 1aebc658acf9..440ba6b91c86 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -23,31 +23,11 @@ copy_user_generic_string(void *to, const void *from, 
unsigned len);
 __must_check unsigned long
 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
 
-static __always_inline __must_check unsigned long
-copy_user_generic(void *to, const void *from, unsigned len)
-{
-       unsigned ret;
-
-       /*
-        * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
-        * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
-        * Otherwise, use copy_user_generic_unrolled.
-        */
-       alternative_call_2(copy_user_generic_unrolled,
-                        copy_user_generic_string,
-                        X86_FEATURE_REP_GOOD,
-                        copy_user_enhanced_fast_string,
-                        X86_FEATURE_ERMS,
-                        ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
-                                    "=d" (len)),
-                        "1" (to), "2" (from), "3" (len)
-                        : "memory", "rcx", "r8", "r9", "r10", "r11");
-       return ret;
-}
-
 __must_check unsigned long
 copy_in_user(void __user *to, const void __user *from, unsigned len);
 
+extern __must_check unsigned long copy_user_generic(void *to, const void 
*from, unsigned len);
+
 static __always_inline __must_check
 int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
 {
@@ -98,16 +78,7 @@ int __copy_from_user(void *dst, const void __user *src, 
unsigned size)
        return __copy_from_user_nocheck(dst, src, size);
 }
 
-static __always_inline __must_check
-int _copy_from_user(void *dst, const void __user *src, unsigned size)
-{
-       if (!access_ok(VERIFY_READ, src, size)) {
-               memset(dst, 0, size);
-               return 0;
-       }
-
-       return copy_user_generic(dst, src, size);
-}
+extern __must_check int _copy_from_user(void *dst, const void __user *src, 
unsigned size);
 
 static __always_inline __must_check
 int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
@@ -159,14 +130,7 @@ int __copy_to_user(void __user *dst, const void *src, 
unsigned size)
        return __copy_to_user_nocheck(dst, src, size);
 }
 
-static __always_inline __must_check
-int _copy_to_user(void __user *dst, const void *src, unsigned size)
-{
-       if (!access_ok(VERIFY_WRITE, dst, size))
-               return size;
-
-       return copy_user_generic(dst, src, size);
-}
+__must_check int _copy_to_user(void __user *dst, const void *src, unsigned 
size);
 
 static __always_inline __must_check
 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 982989d282ff..1885cc5eee32 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -40,6 +40,6 @@ else
         lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
         lib-y += clear_page_64.o copy_page_64.o
         lib-y += memmove_64.o memset_64.o
-        lib-y += copy_user_64.o
+        lib-y += copy_user_64.o uaccess_64.o
        lib-y += cmpxchg16b_emu.o
 endif
diff --git a/arch/x86/lib/uaccess_64.c b/arch/x86/lib/uaccess_64.c
new file mode 100644
index 000000000000..6cd15d874ab4
--- /dev/null
+++ b/arch/x86/lib/uaccess_64.c
@@ -0,0 +1,42 @@
+#include <asm/uaccess.h>
+
+__always_inline __must_check unsigned long
+copy_user_generic(void *to, const void *from, unsigned len)
+{
+       unsigned ret;
+
+       /*
+        * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
+        * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
+        * Otherwise, use copy_user_generic_unrolled.
+        */
+       alternative_call_2(copy_user_generic_unrolled,
+                        copy_user_generic_string,
+                        X86_FEATURE_REP_GOOD,
+                        copy_user_enhanced_fast_string,
+                        X86_FEATURE_ERMS,
+                        ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
+                                    "=d" (len)),
+                        "1" (to), "2" (from), "3" (len)
+                        : "memory", "rcx", "r8", "r9", "r10", "r11");
+       return ret;
+}
+EXPORT_SYMBOL_GPL(copy_user_generic);
+
+__must_check int _copy_from_user(void *dst, const void __user *src, unsigned 
size)
+{
+       if (!access_ok(VERIFY_READ, src, size)) {
+               memset(dst, 0, size);
+               return 0;
+       }
+
+       return copy_user_generic(dst, src, size);
+}
+
+__must_check int _copy_to_user(void __user *dst, const void *src, unsigned 
size)
+{
+       if (!access_ok(VERIFY_WRITE, dst, size))
+               return size;
+
+       return copy_user_generic(dst, src, size);
+}
-- 
2.3.5


-- 
Regards/Gruss,
    Boris.

ECO tip #101: Trim your mails when you reply.
--
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to