Le 17/01/2021 à 18:19, Christopher M. Riedl a écrit :
On Mon Jan 11, 2021 at 7:22 AM CST, Christophe Leroy wrote:


Le 09/01/2021 à 04:25, Christopher M. Riedl a écrit :
Implement raw_copy_from_user_allowed() which assumes that userspace read
access is open. Use this new function to implement raw_copy_from_user().
Finally, wrap the new function to follow the usual "unsafe_" convention
of taking a label argument.

I think there is no point implementing raw_copy_from_user_allowed(), see
https://github.com/linuxppc/linux/commit/4b842e4e25b1 and
https://patchwork.ozlabs.org/project/linuxppc-dev/patch/8c74fc9ce8131cabb10b3e95dc0e430f396ee83e.1610369143.git.christophe.le...@csgroup.eu/

You should simply do:

#define unsafe_copy_from_user(d, s, l, e) \
unsafe_op_wrap(__copy_tofrom_user((__force void __user *)d, s, l), e)


I gave this a try and the signal ops decreased by ~8K. Now, to be
honest, I am not sure what an "acceptable" benchmark number here
actually is - so maybe this is ok? Same loss with both radix and hash:

I don't think this is ok, but it probably means that you are using unsafe_copy_from_user() to copy small constant size data that should be copied with unsafe_get_user() instead.


        |                                      | hash   | radix  |
        | ------------------------------------ | ------ | ------ |
        | linuxppc/next                        | 118693 | 133296 |
        | linuxppc/next w/o KUAP+KUEP          | 228911 | 228654 |
        | unsafe-signal64                      | 200480 | 234067 |
        | unsafe-signal64 (__copy_tofrom_user) | 192467 | 225119 |

To put this into perspective, prior to KUAP and uaccess flush, signal
performance in this benchmark was ~290K on hash.


Christophe


The new raw_copy_from_user_allowed() calls non-inline __copy_tofrom_user()
internally. This is still safe to call inside user access blocks formed
with user_*_access_begin()/user_*_access_end() since asm functions are not
instrumented for tracing.

Signed-off-by: Christopher M. Riedl <c...@codefail.de>
---
   arch/powerpc/include/asm/uaccess.h | 28 +++++++++++++++++++---------
   1 file changed, 19 insertions(+), 9 deletions(-)

diff --git a/arch/powerpc/include/asm/uaccess.h 
b/arch/powerpc/include/asm/uaccess.h
index 501c9a79038c..698f3a6d6ae5 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -403,38 +403,45 @@ raw_copy_in_user(void __user *to, const void __user 
*from, unsigned long n)
   }
   #endif /* __powerpc64__ */
-static inline unsigned long raw_copy_from_user(void *to,
-               const void __user *from, unsigned long n)
+static inline unsigned long
+raw_copy_from_user_allowed(void *to, const void __user *from, unsigned long n)
   {
-       unsigned long ret;
        if (__builtin_constant_p(n) && (n <= 8)) {
-               ret = 1;
+               unsigned long ret = 1;
switch (n) {
                case 1:
                        barrier_nospec();
-                       __get_user_size(*(u8 *)to, from, 1, ret);
+                       __get_user_size_allowed(*(u8 *)to, from, 1, ret);
                        break;
                case 2:
                        barrier_nospec();
-                       __get_user_size(*(u16 *)to, from, 2, ret);
+                       __get_user_size_allowed(*(u16 *)to, from, 2, ret);
                        break;
                case 4:
                        barrier_nospec();
-                       __get_user_size(*(u32 *)to, from, 4, ret);
+                       __get_user_size_allowed(*(u32 *)to, from, 4, ret);
                        break;
                case 8:
                        barrier_nospec();
-                       __get_user_size(*(u64 *)to, from, 8, ret);
+                       __get_user_size_allowed(*(u64 *)to, from, 8, ret);
                        break;
                }
                if (ret == 0)
                        return 0;
        }
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+}
+
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       unsigned long ret;
+
        barrier_nospec();
        allow_read_from_user(from, n);
-       ret = __copy_tofrom_user((__force void __user *)to, from, n);
+       ret = raw_copy_from_user_allowed(to, from, n);
        prevent_read_from_user(from, n);
        return ret;
   }
@@ -542,6 +549,9 @@ user_write_access_begin(const void __user *ptr, size_t len)
   #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
   #define unsafe_put_user(x, p, e) __put_user_goto(x, p, e)
+#define unsafe_copy_from_user(d, s, l, e) \
+       unsafe_op_wrap(raw_copy_from_user_allowed(d, s, l), e)
+
   #define unsafe_copy_to_user(d, s, l, e) \
   do {                                                                 \
        u8 __user *_dst = (u8 __user *)(d);                             \

Reply via email to