Skip to content

Commit

Permalink
mm/kasan: get rid of speculative shadow checks
Browse files Browse the repository at this point in the history
For some unaligned memory accesses we have to check additional byte of
the shadow memory.  Currently we load that byte speculatively to have
only single load + branch on the optimistic fast path.

However, this approach has some downsides:

 - It's unaligned access, so this prevents porting KASAN on
   architectures which doesn't support unaligned accesses.

 - We have to map additional shadow page to prevent crash if speculative
   load happens near the end of the mapped memory. This would
   significantly complicate upcoming memory hotplug support.

I wasn't able to notice any performance degradation with this patch.  So
these speculative loads is just a pain with no gain, let's remove them.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Andrey Ryabinin <[email protected]>
Acked-by: Dmitry Vyukov <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Ingo Molnar <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
aryabinin authored and torvalds committed Jul 10, 2017
1 parent 458f792 commit c634d80
Showing 1 changed file with 16 additions and 82 deletions.
98 changes: 16 additions & 82 deletions mm/kasan/kasan.c
Original file line number Diff line number Diff line change
Expand Up @@ -134,94 +134,30 @@ static __always_inline bool memory_is_poisoned_1(unsigned long addr)
return false;
}

static __always_inline bool memory_is_poisoned_2(unsigned long addr)
static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
unsigned long size)
{
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);

if (unlikely(*shadow_addr)) {
if (memory_is_poisoned_1(addr + 1))
return true;

/*
* If single shadow byte covers 2-byte access, we don't
* need to do anything more. Otherwise, test the first
* shadow byte.
*/
if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
return false;

return unlikely(*(u8 *)shadow_addr);
}
u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);

return false;
}

static __always_inline bool memory_is_poisoned_4(unsigned long addr)
{
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);

if (unlikely(*shadow_addr)) {
if (memory_is_poisoned_1(addr + 3))
return true;

/*
* If single shadow byte covers 4-byte access, we don't
* need to do anything more. Otherwise, test the first
* shadow byte.
*/
if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
return false;

return unlikely(*(u8 *)shadow_addr);
}

return false;
}

static __always_inline bool memory_is_poisoned_8(unsigned long addr)
{
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);

if (unlikely(*shadow_addr)) {
if (memory_is_poisoned_1(addr + 7))
return true;

/*
* If single shadow byte covers 8-byte access, we don't
* need to do anything more. Otherwise, test the first
* shadow byte.
*/
if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
return false;

return unlikely(*(u8 *)shadow_addr);
}
/*
* Access crosses 8(shadow size)-byte boundary. Such access maps
* into 2 shadow bytes, so we need to check them both.
*/
if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
return *shadow_addr || memory_is_poisoned_1(addr + size - 1);

return false;
return memory_is_poisoned_1(addr + size - 1);
}

static __always_inline bool memory_is_poisoned_16(unsigned long addr)
{
u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);

if (unlikely(*shadow_addr)) {
u16 shadow_first_bytes = *(u16 *)shadow_addr;

if (unlikely(shadow_first_bytes))
return true;

/*
* If two shadow bytes covers 16-byte access, we don't
* need to do anything more. Otherwise, test the last
* shadow byte.
*/
if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
return false;
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);

return memory_is_poisoned_1(addr + 15);
}
/* Unaligned 16-bytes access maps into 3 shadow bytes. */
if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
return *shadow_addr || memory_is_poisoned_1(addr + 15);

return false;
return *shadow_addr;
}

static __always_inline unsigned long bytes_is_zero(const u8 *start,
Expand Down Expand Up @@ -292,11 +228,9 @@ static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
case 1:
return memory_is_poisoned_1(addr);
case 2:
return memory_is_poisoned_2(addr);
case 4:
return memory_is_poisoned_4(addr);
case 8:
return memory_is_poisoned_8(addr);
return memory_is_poisoned_2_4_8(addr, size);
case 16:
return memory_is_poisoned_16(addr);
default:
Expand Down

0 comments on commit c634d80

Please sign in to comment.