Use add_wrap() to annotate the addition in atomic_add_return() as
expecting to wrap around.

Signed-off-by: Kees Cook <keesc...@chromium.org>
---
Cc: Will Deacon <w...@kernel.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Boqun Feng <boqun.f...@gmail.com>
Cc: Mark Rutland <mark.rutl...@arm.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: x...@kernel.org
Cc: "H. Peter Anvin" <h...@zytor.com>
---
 arch/x86/include/asm/atomic.h      | 3 ++-
 arch/x86/include/asm/atomic64_32.h | 2 +-
 arch/x86/include/asm/atomic64_64.h | 2 +-
 3 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index 55a55ec04350..a5862a258760 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -3,6 +3,7 @@
 #define _ASM_X86_ATOMIC_H
 
 #include <linux/compiler.h>
+#include <linux/overflow.h>
 #include <linux/types.h>
 #include <asm/alternative.h>
 #include <asm/cmpxchg.h>
@@ -82,7 +83,7 @@ static __always_inline bool arch_atomic_add_negative(int i, 
atomic_t *v)
 
 static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
 {
-       return i + xadd(&v->counter, i);
+       return wrapping_add(int, i, xadd(&v->counter, i));
 }
 #define arch_atomic_add_return arch_atomic_add_return
 
diff --git a/arch/x86/include/asm/atomic64_32.h 
b/arch/x86/include/asm/atomic64_32.h
index 3486d91b8595..608b100e8ffe 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -254,7 +254,7 @@ static __always_inline s64 arch_atomic64_fetch_add(s64 i, 
atomic64_t *v)
 {
        s64 old, c = 0;
 
-       while ((old = arch_atomic64_cmpxchg(v, c, c + i)) != c)
+       while ((old = arch_atomic64_cmpxchg(v, c, wrapping_add(s64, c, i))) != 
c)
                c = old;
 
        return old;
diff --git a/arch/x86/include/asm/atomic64_64.h 
b/arch/x86/include/asm/atomic64_64.h
index 3165c0feedf7..f1dc8aa54b52 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -76,7 +76,7 @@ static __always_inline bool arch_atomic64_add_negative(s64 i, 
atomic64_t *v)
 
 static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
 {
-       return i + xadd(&v->counter, i);
+       return wrapping_add(s64, i, xadd(&v->counter, i));
 }
 #define arch_atomic64_add_return arch_atomic64_add_return
 
-- 
2.34.1


Reply via email to