Linus,

Please pull the latest x86-urgent-for-linus git tree from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 
x86-urgent-for-linus

   # HEAD: 5b77e95dd7790ff6c8fbf1cd8d0104ebed818a03 x86/asm: Use stricter 
assembly constraints in bitops

Fix typos in user-visible resctrl parameters, and also fix assembly 
constraint bugs that might result in miscompilation.

 Thanks,

        Ingo

------------------>
Alexander Potapenko (1):
      x86/asm: Use stricter assembly constraints in bitops

Xiaochen Shen (1):
      x86/resctrl: Fix typos in the mba_sc mount option


 arch/x86/include/asm/bitops.h          | 41 +++++++++++++++-------------------
 arch/x86/kernel/cpu/resctrl/rdtgroup.c |  6 ++---
 2 files changed, 21 insertions(+), 26 deletions(-)

diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index d153d570bb04..8e790ec219a5 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -36,16 +36,17 @@
  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  */
 
-#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
+#define RLONG_ADDR(x)                   "m" (*(volatile long *) (x))
+#define WBYTE_ADDR(x)                  "+m" (*(volatile char *) (x))
 
-#define ADDR                           BITOP_ADDR(addr)
+#define ADDR                           RLONG_ADDR(addr)
 
 /*
  * We do the locked ops that don't return the old value as
  * a mask operation on a byte.
  */
 #define IS_IMMEDIATE(nr)               (__builtin_constant_p(nr))
-#define CONST_MASK_ADDR(nr, addr)      BITOP_ADDR((void *)(addr) + ((nr)>>3))
+#define CONST_MASK_ADDR(nr, addr)      WBYTE_ADDR((void *)(addr) + ((nr)>>3))
 #define CONST_MASK(nr)                 (1 << ((nr) & 7))
 
 /**
@@ -73,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr)
                        : "memory");
        } else {
                asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
-                       : BITOP_ADDR(addr) : "Ir" (nr) : "memory");
+                       : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
        }
 }
 
@@ -88,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr)
  */
 static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
 {
-       asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
+       asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 }
 
 /**
@@ -110,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr)
                        : "iq" ((u8)~CONST_MASK(nr)));
        } else {
                asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
-                       : BITOP_ADDR(addr)
-                       : "Ir" (nr));
+                       : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
        }
 }
 
@@ -131,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, 
volatile unsigned long *ad
 
 static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
 {
-       asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
+       asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 }
 
 static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, 
volatile unsigned long *addr)
@@ -139,7 +139,7 @@ static __always_inline bool 
clear_bit_unlock_is_negative_byte(long nr, volatile
        bool negative;
        asm volatile(LOCK_PREFIX "andb %2,%1"
                CC_SET(s)
-               : CC_OUT(s) (negative), ADDR
+               : CC_OUT(s) (negative), WBYTE_ADDR(addr)
                : "ir" ((char) ~(1 << nr)) : "memory");
        return negative;
 }
@@ -155,13 +155,9 @@ static __always_inline bool 
clear_bit_unlock_is_negative_byte(long nr, volatile
  * __clear_bit() is non-atomic and implies release semantics before the memory
  * operation. It can be used for an unlock if no other CPUs can concurrently
  * modify other bits in the word.
- *
- * No memory barrier is required here, because x86 cannot reorder stores past
- * older loads. Same principle as spin_unlock.
  */
 static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long 
*addr)
 {
-       barrier();
        __clear_bit(nr, addr);
 }
 
@@ -176,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, 
volatile unsigned long *
  */
 static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
 {
-       asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
+       asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 }
 
 /**
@@ -196,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile 
unsigned long *addr)
                        : "iq" ((u8)CONST_MASK(nr)));
        } else {
                asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
-                       : BITOP_ADDR(addr)
-                       : "Ir" (nr));
+                       : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
        }
 }
 
@@ -242,8 +237,8 @@ static __always_inline bool __test_and_set_bit(long nr, 
volatile unsigned long *
 
        asm(__ASM_SIZE(bts) " %2,%1"
            CC_SET(c)
-           : CC_OUT(c) (oldbit), ADDR
-           : "Ir" (nr));
+           : CC_OUT(c) (oldbit)
+           : ADDR, "Ir" (nr) : "memory");
        return oldbit;
 }
 
@@ -282,8 +277,8 @@ static __always_inline bool __test_and_clear_bit(long nr, 
volatile unsigned long
 
        asm volatile(__ASM_SIZE(btr) " %2,%1"
                     CC_SET(c)
-                    : CC_OUT(c) (oldbit), ADDR
-                    : "Ir" (nr));
+                    : CC_OUT(c) (oldbit)
+                    : ADDR, "Ir" (nr) : "memory");
        return oldbit;
 }
 
@@ -294,8 +289,8 @@ static __always_inline bool __test_and_change_bit(long nr, 
volatile unsigned lon
 
        asm volatile(__ASM_SIZE(btc) " %2,%1"
                     CC_SET(c)
-                    : CC_OUT(c) (oldbit), ADDR
-                    : "Ir" (nr) : "memory");
+                    : CC_OUT(c) (oldbit)
+                    : ADDR, "Ir" (nr) : "memory");
 
        return oldbit;
 }
@@ -326,7 +321,7 @@ static __always_inline bool variable_test_bit(long nr, 
volatile const unsigned l
        asm volatile(__ASM_SIZE(bt) " %2,%1"
                     CC_SET(c)
                     : CC_OUT(c) (oldbit)
-                    : "m" (*(unsigned long *)addr), "Ir" (nr));
+                    : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
 
        return oldbit;
 }
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c 
b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 399601eda8e4..54b9eef3eea9 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -2039,14 +2039,14 @@ static int rdt_get_tree(struct fs_context *fc)
 enum rdt_param {
        Opt_cdp,
        Opt_cdpl2,
-       Opt_mba_mpbs,
+       Opt_mba_mbps,
        nr__rdt_params
 };
 
 static const struct fs_parameter_spec rdt_param_specs[] = {
        fsparam_flag("cdp",             Opt_cdp),
        fsparam_flag("cdpl2",           Opt_cdpl2),
-       fsparam_flag("mba_mpbs",        Opt_mba_mpbs),
+       fsparam_flag("mba_MBps",        Opt_mba_mbps),
        {}
 };
 
@@ -2072,7 +2072,7 @@ static int rdt_parse_param(struct fs_context *fc, struct 
fs_parameter *param)
        case Opt_cdpl2:
                ctx->enable_cdpl2 = true;
                return 0;
-       case Opt_mba_mpbs:
+       case Opt_mba_mbps:
                if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
                        return -EINVAL;
                ctx->enable_mba_mbps = true;

Reply via email to