For some unaligned memory accesses we have to check additional
byte of the shadow memory. Currently we load that byte speculatively
to have only single load + branch on the optimistic fast path.

However, this approach have some downsides:
 - It's unaligned access, so this prevents porting KASAN on architectures
    which doesn't support unaligned accesses.
 - We have to map additional shadow page to prevent crash if
    speculative load happens near the end of the mapped memory.
    This would significantly complicate upcoming memory hotplug support.

I wasn't able to notice any performance degradation with this patch.
So these speculative loads is just a pain with no gain, let's remove
them.

Signed-off-by: Andrey Ryabinin <aryabi...@virtuozzo.com>
---
 mm/kasan/kasan.c | 98 +++++++++-----------------------------------------------
 1 file changed, 16 insertions(+), 82 deletions(-)

diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 85ee45b07615..e6fe07a98677 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -134,94 +134,30 @@ static __always_inline bool memory_is_poisoned_1(unsigned 
long addr)
        return false;
 }
 
-static __always_inline bool memory_is_poisoned_2(unsigned long addr)
+static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
+                                               unsigned long size)
 {
-       u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
-
-       if (unlikely(*shadow_addr)) {
-               if (memory_is_poisoned_1(addr + 1))
-                       return true;
-
-               /*
-                * If single shadow byte covers 2-byte access, we don't
-                * need to do anything more. Otherwise, test the first
-                * shadow byte.
-                */
-               if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
-                       return false;
-
-               return unlikely(*(u8 *)shadow_addr);
-       }
+       u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
 
-       return false;
-}
-
-static __always_inline bool memory_is_poisoned_4(unsigned long addr)
-{
-       u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
-
-       if (unlikely(*shadow_addr)) {
-               if (memory_is_poisoned_1(addr + 3))
-                       return true;
-
-               /*
-                * If single shadow byte covers 4-byte access, we don't
-                * need to do anything more. Otherwise, test the first
-                * shadow byte.
-                */
-               if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
-                       return false;
-
-               return unlikely(*(u8 *)shadow_addr);
-       }
-
-       return false;
-}
-
-static __always_inline bool memory_is_poisoned_8(unsigned long addr)
-{
-       u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
-
-       if (unlikely(*shadow_addr)) {
-               if (memory_is_poisoned_1(addr + 7))
-                       return true;
-
-               /*
-                * If single shadow byte covers 8-byte access, we don't
-                * need to do anything more. Otherwise, test the first
-                * shadow byte.
-                */
-               if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
-                       return false;
-
-               return unlikely(*(u8 *)shadow_addr);
-       }
+       /*
+        * Access crosses 8(shadow size)-byte boundary. Such access maps
+        * into 2 shadow bytes, so we need to check them both.
+        */
+       if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
+               return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
 
-       return false;
+       return memory_is_poisoned_1(addr + size - 1);
 }
 
 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
 {
-       u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
-
-       if (unlikely(*shadow_addr)) {
-               u16 shadow_first_bytes = *(u16 *)shadow_addr;
-
-               if (unlikely(shadow_first_bytes))
-                       return true;
-
-               /*
-                * If two shadow bytes covers 16-byte access, we don't
-                * need to do anything more. Otherwise, test the last
-                * shadow byte.
-                */
-               if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
-                       return false;
+       u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
 
-               return memory_is_poisoned_1(addr + 15);
-       }
+       /* Unaligned 16-bytes access maps into 3 shadow bytes. */
+       if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
+               return *shadow_addr || memory_is_poisoned_1(addr + 15);
 
-       return false;
+       return *shadow_addr;
 }
 
 static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
@@ -292,11 +228,9 @@ static __always_inline bool memory_is_poisoned(unsigned 
long addr, size_t size)
                case 1:
                        return memory_is_poisoned_1(addr);
                case 2:
-                       return memory_is_poisoned_2(addr);
                case 4:
-                       return memory_is_poisoned_4(addr);
                case 8:
-                       return memory_is_poisoned_8(addr);
+                       return memory_is_poisoned_2_4_8(addr, size);
                case 16:
                        return memory_is_poisoned_16(addr);
                default:
-- 
2.13.0

Reply via email to