The following commit has been merged into the x86/entry branch of tip:

Commit-ID:     c74809a9760f0d0c80ebe4e6ddcc9aebba9d90bc
Gitweb:        
https://git.kernel.org/tip/c74809a9760f0d0c80ebe4e6ddcc9aebba9d90bc
Author:        Peter Zijlstra <pet...@infradead.org>
AuthorDate:    Wed, 03 Jun 2020 13:40:19 +02:00
Committer:     Thomas Gleixner <t...@linutronix.de>
CommitterDate: Wed, 03 Jun 2020 16:35:37 +02:00

x86/entry: __always_inline arch_atomic_* for noinstr

vmlinux.o: warning: objtool: rcu_dynticks_eqs_exit()+0x33: call to 
arch_atomic_and.constprop.0() leaves .noinstr.text section

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Link: https://lkml.kernel.org/r/20200603114052.070166...@infradead.org

---
 arch/x86/include/asm/atomic.h | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index a9ae588..bf35e47 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -205,13 +205,13 @@ static __always_inline bool 
arch_atomic_try_cmpxchg(atomic_t *v, int *old, int n
 }
 #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
 
-static inline int arch_atomic_xchg(atomic_t *v, int new)
+static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
 {
        return arch_xchg(&v->counter, new);
 }
 #define arch_atomic_xchg arch_atomic_xchg
 
-static inline void arch_atomic_and(int i, atomic_t *v)
+static __always_inline void arch_atomic_and(int i, atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "andl %1,%0"
                        : "+m" (v->counter)
@@ -219,7 +219,7 @@ static inline void arch_atomic_and(int i, atomic_t *v)
                        : "memory");
 }
 
-static inline int arch_atomic_fetch_and(int i, atomic_t *v)
+static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
 {
        int val = arch_atomic_read(v);
 
@@ -229,7 +229,7 @@ static inline int arch_atomic_fetch_and(int i, atomic_t *v)
 }
 #define arch_atomic_fetch_and arch_atomic_fetch_and
 
-static inline void arch_atomic_or(int i, atomic_t *v)
+static __always_inline void arch_atomic_or(int i, atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "orl %1,%0"
                        : "+m" (v->counter)
@@ -237,7 +237,7 @@ static inline void arch_atomic_or(int i, atomic_t *v)
                        : "memory");
 }
 
-static inline int arch_atomic_fetch_or(int i, atomic_t *v)
+static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
 {
        int val = arch_atomic_read(v);
 
@@ -247,7 +247,7 @@ static inline int arch_atomic_fetch_or(int i, atomic_t *v)
 }
 #define arch_atomic_fetch_or arch_atomic_fetch_or
 
-static inline void arch_atomic_xor(int i, atomic_t *v)
+static __always_inline void arch_atomic_xor(int i, atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "xorl %1,%0"
                        : "+m" (v->counter)
@@ -255,7 +255,7 @@ static inline void arch_atomic_xor(int i, atomic_t *v)
                        : "memory");
 }
 
-static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
+static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
 {
        int val = arch_atomic_read(v);
 

Reply via email to