Current futex atomic operations are implemented with ll/sc instructions
and clearing PSTATE.PAN.

Since Armv9.6, FEAT_LSUI supplies not only load/store instructions but
also atomic operation for user memory access in kernel it doesn't need
to clear PSTATE.PAN bit anymore.

With theses instructions some of futex atomic operations don't need to
be implmented with ldxr/stlxr pair instead can be implmented with
one atomic operation supplied by FEAT_LSUI and don't enable mto like
ldtr*/sttr* instructions usage.

However, some of futex atomic operation don't have matched
instructuion i.e) eor or cmpxchg with word size.
For those operation, uses cas{al}t to implement them.

Signed-off-by: Yeoreum Yun <[email protected]>
---
 arch/arm64/include/asm/futex.h | 166 ++++++++++++++++++++++++++++++++-
 1 file changed, 164 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 9a0efed50743..b579e9d0964d 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -9,6 +9,8 @@
 #include <linux/uaccess.h>
 #include <linux/stringify.h>
 
+#include <asm/alternative.h>
+#include <asm/alternative-macros.h>
 #include <asm/errno.h>
 
 #define FUTEX_MAX_LOOPS        128 /* What's the largest number you can think 
of? */
@@ -87,11 +89,171 @@ __llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 
newval, u32 *oval)
        return ret;
 }
 
+#ifdef CONFIG_ARM64_LSUI
+
+#define __LSUI_PREAMBLE        ".arch_extension lsui\n"
+
+#define LSUI_FUTEX_ATOMIC_OP(op, asm_op)                               \
+static __always_inline int                                             \
+__lsui_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval)      \
+{                                                                      \
+       int ret = 0;                                                    \
+       int oldval;                                                     \
+                                                                       \
+       uaccess_ttbr0_enable();                                         \
+                                                                       \
+       asm volatile("// __lsui_futex_atomic_" #op "\n"                 \
+       __LSUI_PREAMBLE                                                 \
+"1:    " #asm_op "al   %w3, %w2, %1\n"                                 \
+"2:\n"                                                                 \
+       _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)                           \
+       : "+r" (ret), "+Q" (*uaddr), "=r" (oldval)                      \
+       : "r" (oparg)                                                   \
+       : "memory");                                                    \
+                                                                       \
+       uaccess_ttbr0_disable();                                        \
+                                                                       \
+       if (!ret)                                                       \
+               *oval = oldval;                                         \
+       return ret;                                                     \
+}
+
+LSUI_FUTEX_ATOMIC_OP(add, ldtadd)
+LSUI_FUTEX_ATOMIC_OP(or, ldtset)
+LSUI_FUTEX_ATOMIC_OP(andnot, ldtclr)
+LSUI_FUTEX_ATOMIC_OP(set, swpt)
+
+static __always_inline int
+__lsui_cmpxchg64(u64 __user *uaddr, u64 *oldval, u64 newval)
+{
+       int ret = 0;
+
+       uaccess_ttbr0_enable();
+
+       asm volatile("// __lsui_cmpxchg64\n"
+       __LSUI_PREAMBLE
+"1:    casalt  %2, %3, %1\n"
+"2:\n"
+       _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)
+       : "+r" (ret), "+Q" (*uaddr), "+r" (*oldval)
+       : "r" (newval)
+       : "memory");
+
+       uaccess_ttbr0_disable();
+
+       return ret;
+}
+
+static __always_inline int
+__lsui_cmpxchg32(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
+{
+       u64 __user *uaddr64;
+       bool futex_pos, other_pos;
+       int ret, i;
+       u32 other, orig_other;
+       union {
+               u32 futex[2];
+               u64 raw;
+       } oval64, orig64, nval64;
+
+       uaddr64 = (u64 __user *) PTR_ALIGN_DOWN(uaddr, sizeof(u64));
+       futex_pos = !IS_ALIGNED((unsigned long)uaddr, sizeof(u64));
+       other_pos = !futex_pos;
+
+       oval64.futex[futex_pos] = oldval;
+       ret = get_user(oval64.futex[other_pos], (u32 __user *)uaddr64 + 
other_pos);
+       if (ret)
+               return -EFAULT;
+
+       ret = -EAGAIN;
+       for (i = 0; i < FUTEX_MAX_LOOPS; i++) {
+               orig64.raw = nval64.raw = oval64.raw;
+
+               nval64.futex[futex_pos] = newval;
+
+               if (__lsui_cmpxchg64(uaddr64, &oval64.raw, nval64.raw))
+                       return -EFAULT;
+
+               oldval = oval64.futex[futex_pos];
+               other = oval64.futex[other_pos];
+               orig_other = orig64.futex[other_pos];
+
+               if (other == orig_other) {
+                       ret = 0;
+                       break;
+               }
+       }
+
+       if (!ret)
+               *oval = oldval;
+
+       return ret;
+}
+
+static __always_inline int
+__lsui_futex_atomic_and(int oparg, u32 __user *uaddr, int *oval)
+{
+       /*
+        * Undo the bitwise negation applied to the oparg passed from
+        * arch_futex_atomic_op_inuser() with FUTEX_OP_ANDN.
+        */
+       return __lsui_futex_atomic_andnot(~oparg, uaddr, oval);
+}
+
+static __always_inline int
+__lsui_futex_atomic_eor(int oparg, u32 __user *uaddr, int *oval)
+{
+       u32 oldval, newval, val;
+       int ret, i;
+
+       if (get_user(oldval, uaddr))
+               return -EFAULT;
+
+       /*
+        * there are no ldteor/stteor instructions...
+        */
+       for (i = 0; i < FUTEX_MAX_LOOPS; i++) {
+               newval = oldval ^ oparg;
+
+               ret = __lsui_cmpxchg32(uaddr, oldval, newval, &val);
+               if (ret)
+                       return ret;
+
+               if (val == oldval) {
+                       *oval = val;
+                       return 0;
+               }
+
+               oldval = val;
+       }
+
+       return -EAGAIN;
+}
+
+static __always_inline int
+__lsui_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
+{
+       return __lsui_cmpxchg32(uaddr, oldval, newval, oval);
+}
+
+#define __lsui_llsc_body(op, ...)                                      \
+({                                                                     \
+       alternative_has_cap_unlikely(ARM64_HAS_LSUI) ?                  \
+               __lsui_##op(__VA_ARGS__) : __llsc_##op(__VA_ARGS__);    \
+})
+
+#else  /* CONFIG_ARM64_LSUI */
+
+#define __lsui_llsc_body(op, ...)      __llsc_##op(__VA_ARGS__)
+
+#endif /* CONFIG_ARM64_LSUI */
+
+
 #define FUTEX_ATOMIC_OP(op)                                            \
 static __always_inline int                                             \
 __futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval)           \
 {                                                                      \
-       return __llsc_futex_atomic_##op(oparg, uaddr, oval);            \
+       return __lsui_llsc_body(futex_atomic_##op, oparg, uaddr, oval); \
 }
 
 FUTEX_ATOMIC_OP(add)
@@ -103,7 +265,7 @@ FUTEX_ATOMIC_OP(set)
 static __always_inline int
 __futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
 {
-       return __llsc_futex_cmpxchg(uaddr, oldval, newval, oval);
+       return __lsui_llsc_body(futex_cmpxchg, uaddr, oldval, newval, oval);
 }
 
 static inline int
-- 
LEVI:{C3F47F37-75D8-414A-A8BA-3980EC8A46D7}


Reply via email to