On Sun, Dec 14, 2025 at 11:22:45AM +0000, Yeoreum Yun wrote:
> Current futex atomic operations are implemented with ll/sc instructions
> and clearing PSTATE.PAN.
>
> Since Armv9.6, FEAT_LSUI supplies not only load/store instructions but
> also atomic operation for user memory access in kernel it doesn't need
> to clear PSTATE.PAN bit anymore.
>
> With theses instructions some of futex atomic operations don't need to
> be implmented with ldxr/stlxr pair instead can be implmented with
> one atomic operation supplied by FEAT_LSUI.
>
> However, some of futex atomic operation don't have matched
> instructuion i.e) eor or cmpxchg with word size.
> For those operation, uses cas{al}t to implement them.
>
> Signed-off-by: Yeoreum Yun <[email protected]>
> ---
> arch/arm64/include/asm/futex.h | 180 ++++++++++++++++++++++++++++++++-
> 1 file changed, 178 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
> index f8cb674bdb3f..6778ff7e1c0e 100644
> --- a/arch/arm64/include/asm/futex.h
> +++ b/arch/arm64/include/asm/futex.h
> @@ -9,6 +9,8 @@
> #include <linux/uaccess.h>
> #include <linux/stringify.h>
>
> +#include <asm/alternative.h>
> +#include <asm/alternative-macros.h>
> #include <asm/errno.h>
>
> #define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think
> of? */
> @@ -86,11 +88,185 @@ __llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32
> newval, u32 *oval)
> return ret;
> }
>
> +#ifdef CONFIG_AS_HAS_LSUI
> +
> +/*
> + * When the LSUI feature is present, the CPU also implements PAN, because
> + * FEAT_PAN has been mandatory since Armv8.1. Therefore, there is no need to
> + * call uaccess_ttbr0_enable()/uaccess_ttbr0_disable() around each LSUI
> + * operation.
> + */
I'd prefer not to rely on these sorts of properties because:
- CPU bugs happen all the time
- Virtualisation and idreg overrides mean illegal feature combinations
can show up
- The architects sometimes change their mind
So let's either drop the assumption that we have PAN if LSUI *or* actually
test that someplace during feature initialisation.
> +
> +#define __LSUI_PREAMBLE ".arch_extension lsui\n"
> +
> +#define LSUI_FUTEX_ATOMIC_OP(op, asm_op, mb) \
> +static __always_inline int \
> +__lsui_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \
> +{ \
> + int ret = 0; \
> + int oldval; \
> + \
> + asm volatile("// __lsui_futex_atomic_" #op "\n" \
> + __LSUI_PREAMBLE \
> +"1: " #asm_op #mb " %w3, %w2, %1\n" \
What's the point in separating the barrier suffix from the rest of the
instruction mnemonic? All the callers use -AL.
> +"2:\n"
> \
> + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \
> + : "+r" (ret), "+Q" (*uaddr), "=r" (oldval) \
> + : "r" (oparg) \
> + : "memory"); \
> + \
> + if (!ret) \
> + *oval = oldval; \
> + \
> + return ret; \
> +}
> +
> +LSUI_FUTEX_ATOMIC_OP(add, ldtadd, al)
> +LSUI_FUTEX_ATOMIC_OP(or, ldtset, al)
> +LSUI_FUTEX_ATOMIC_OP(andnot, ldtclr, al)
> +LSUI_FUTEX_ATOMIC_OP(set, swpt, al)
> +
> +static __always_inline int
> +__lsui_cmpxchg64(u64 __user *uaddr, u64 *oldval, u64 newval)
> +{
> + int ret = 0;
> +
> + asm volatile("// __lsui_cmpxchg64\n"
> + __LSUI_PREAMBLE
> +"1: casalt %x2, %x3, %1\n"
How bizarre, they changed the order of the AL and T compared to SWPTAL.
Fair enough...
Also, I don't think you need the 'x' prefix on the 64-bit variables.
> +"2:\n"
> + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0)
> + : "+r" (ret), "+Q" (*uaddr), "+r" (*oldval)
> + : "r" (newval)
> + : "memory");
Don't you need to update *oldval here if the CAS didn't fault?
> +
> + return ret;
> +}
> +
> +static __always_inline int
> +__lsui_cmpxchg32(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
> +{
> + u64 __user *uaddr64;
> + bool futex_on_lo;
> + int ret = -EAGAIN, i;
> + u32 other, orig_other;
> + union {
> + struct futex_on_lo {
> + u32 val;
> + u32 other;
> + } lo_futex;
> +
> + struct futex_on_hi {
> + u32 other;
> + u32 val;
> + } hi_futex;
> +
> + u64 raw;
> + } oval64, orig64, nval64;
> +
> + uaddr64 = (u64 __user *) PTR_ALIGN_DOWN(uaddr, sizeof(u64));
> + futex_on_lo = (IS_ALIGNED((unsigned long)uaddr, sizeof(u64)) ==
> + IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN));
Just make LSUI depend on !CPU_BIG_ENDIAN in Kconfig. The latter already
depends on BROKEN and so we'll probably drop it soon anyway. There's
certainly no need to care about it for new features and it should simplify
the code you have here if you can assume little-endian.
> +
> + for (i = 0; i < FUTEX_MAX_LOOPS; i++) {
> + if (get_user(oval64.raw, uaddr64))
> + return -EFAULT;
Since oldval is passed to us as an argument, can we get away with a
32-bit get_user() here?
> +
> + nval64.raw = oval64.raw;
> +
> + if (futex_on_lo) {
> + oval64.lo_futex.val = oldval;
> + nval64.lo_futex.val = newval;
> + } else {
> + oval64.hi_futex.val = oldval;
> + nval64.hi_futex.val = newval;
> + }
> +
> + orig64.raw = oval64.raw;
> +
> + if (__lsui_cmpxchg64(uaddr64, &oval64.raw, nval64.raw))
> + return -EFAULT;
> +
> + if (futex_on_lo) {
> + oldval = oval64.lo_futex.val;
> + other = oval64.lo_futex.other;
> + orig_other = orig64.lo_futex.other;
> + } else {
> + oldval = oval64.hi_futex.val;
> + other = oval64.hi_futex.other;
> + orig_other = orig64.hi_futex.other;
> + }
> +
> + if (other == orig_other) {
> + ret = 0;
> + break;
> + }
> + }
> +
> + if (!ret)
> + *oval = oldval;
Shouldn't we set *oval to the value we got back from the CAS?
> +
> + return ret;
> +}
> +
> +static __always_inline int
> +__lsui_futex_atomic_and(int oparg, u32 __user *uaddr, int *oval)
> +{
> + return __lsui_futex_atomic_andnot(~oparg, uaddr, oval);
Please a comment about the bitwise negation of oparg here as we're undoing
the one from the caller.
> +}
> +
> +static __always_inline int
> +__lsui_futex_atomic_eor(int oparg, u32 __user *uaddr, int *oval)
> +{
> + u32 oldval, newval, val;
> + int ret, i;
> +
> + /*
> + * there are no ldteor/stteor instructions...
> + */
> + for (i = 0; i < FUTEX_MAX_LOOPS; i++) {
> + if (get_user(oldval, uaddr))
> + return -EFAULT;
> +
> + newval = oldval ^ oparg;
> +
> + ret = __lsui_cmpxchg32(uaddr, oldval, newval, &val);
> + if (ret)
> + return ret;
> +
> + if (val == oldval) {
> + *oval = val;
> + return 0;
> + }
> + }
> +
> + return -EAGAIN;
> +}
> +
> +static __always_inline int
> +__lsui_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval)
> +{
> + return __lsui_cmpxchg32(uaddr, oldval, newval, oval);
> +}
> +
> +#define __lsui_llsc_body(op, ...) \
> +({ \
> + alternative_has_cap_likely(ARM64_HAS_LSUI) ? \
This doesn't seem like it should be the "likely" case just yet?
Will