Annotate atomic_add_return() and atomic_sub_return() to avoid signed
overflow instrumentation. They are expected to wrap around.

Signed-off-by: Kees Cook <keesc...@chromium.org>
---
Cc: Will Deacon <w...@kernel.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Boqun Feng <boqun.f...@gmail.com>
Cc: Mark Rutland <mark.rutl...@arm.com>
Cc: Catalin Marinas <catalin.mari...@arm.com>
Cc: linux-arm-ker...@lists.infradead.org
---
 arch/arm64/include/asm/atomic_lse.h | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/include/asm/atomic_lse.h 
b/arch/arm64/include/asm/atomic_lse.h
index 87f568a94e55..a33576b20b52 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -10,6 +10,8 @@
 #ifndef __ASM_ATOMIC_LSE_H
 #define __ASM_ATOMIC_LSE_H
 
+#include <linux/overflow.h>
+
 #define ATOMIC_OP(op, asm_op)                                          \
 static __always_inline void                                            \
 __lse_atomic_##op(int i, atomic_t *v)                                  \
@@ -82,13 +84,13 @@ ATOMIC_FETCH_OP_SUB(        )
 static __always_inline int                                             \
 __lse_atomic_add_return##name(int i, atomic_t *v)                      \
 {                                                                      \
-       return __lse_atomic_fetch_add##name(i, v) + i;                  \
+       return wrapping_add(int, __lse_atomic_fetch_add##name(i, v), i);\
 }                                                                      \
                                                                        \
 static __always_inline int                                             \
 __lse_atomic_sub_return##name(int i, atomic_t *v)                      \
 {                                                                      \
-       return __lse_atomic_fetch_sub(i, v) - i;                        \
+       return wrapping_sub(int, __lse_atomic_fetch_sub(i, v), i);      \
 }
 
 ATOMIC_OP_ADD_SUB_RETURN(_relaxed)
@@ -189,13 +191,13 @@ ATOMIC64_FETCH_OP_SUB(        )
 static __always_inline long                                            \
 __lse_atomic64_add_return##name(s64 i, atomic64_t *v)                  \
 {                                                                      \
-       return __lse_atomic64_fetch_add##name(i, v) + i;                \
+       return wrapping_add(s64, __lse_atomic64_fetch_add##name(i, v), i); \
 }                                                                      \
                                                                        \
 static __always_inline long                                            \
 __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)                  \
 {                                                                      \
-       return __lse_atomic64_fetch_sub##name(i, v) - i;                \
+       return wrapping_sub(s64, __lse_atomic64_fetch_sub##name(i, v), i); \
 }
 
 ATOMIC64_OP_ADD_SUB_RETURN(_relaxed)
-- 
2.34.1


Reply via email to