Skip to content

Commit

Permalink
kcsan: Improve various small stylistic details
Browse files Browse the repository at this point in the history
Tidy up a few bits:

  - Fix typos and grammar, improve wording.

  - Remove spurious newlines that are col80 warning artifacts where the
    resulting line-break is worse than the disease it's curing.

  - Use core kernel coding style to improve readability and reduce
    spurious code pattern variations.

  - Use better vertical alignment for structure definitions and initialization
    sequences.

  - Misc other small details.

No change in functionality intended.

Cc: [email protected]
Cc: Marco Elver <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Paul E. McKenney <[email protected]>
Cc: Will Deacon <[email protected]>
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
Ingo Molnar committed Nov 20, 2019
1 parent 8e1d58a commit 5cbaefe
Show file tree
Hide file tree
Showing 15 changed files with 131 additions and 153 deletions.
2 changes: 1 addition & 1 deletion arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ config X86
select VIRT_TO_BUS
select X86_FEATURE_NAMES if PROC_FS
select PROC_PID_ARCH_STATUS if PROC_FS
select HAVE_ARCH_KCSAN if X86_64
select HAVE_ARCH_KCSAN if X86_64

config INSTRUCTION_DECODER
def_bool y
Expand Down
2 changes: 1 addition & 1 deletion include/linux/compiler-clang.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
#define KASAN_ABI_VERSION 5

#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
/* emulate gcc's __SANITIZE_ADDRESS__ flag */
/* Emulate GCC's __SANITIZE_ADDRESS__ flag */
#define __SANITIZE_ADDRESS__
#define __no_sanitize_address \
__attribute__((no_sanitize("address", "hwaddress")))
Expand Down
2 changes: 1 addition & 1 deletion include/linux/compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ unsigned long read_word_at_a_time(const void *addr)
#include <linux/kcsan.h>

/*
* data_race: macro to document that accesses in an expression may conflict with
* data_race(): macro to document that accesses in an expression may conflict with
* other concurrent accesses resulting in data races, but the resulting
* behaviour is deemed safe regardless.
*
Expand Down
22 changes: 9 additions & 13 deletions include/linux/kcsan-checks.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,17 @@
/*
* Access type modifiers.
*/
#define KCSAN_ACCESS_WRITE 0x1
#define KCSAN_ACCESS_WRITE 0x1
#define KCSAN_ACCESS_ATOMIC 0x2

/*
* __kcsan_*: Always calls into runtime when KCSAN is enabled. This may be used
* __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
* even in compilation units that selectively disable KCSAN, but must use KCSAN
* to validate access to an address. Never use these in header files!
* to validate access to an address. Never use these in header files!
*/
#ifdef CONFIG_KCSAN
/**
* __kcsan_check_access - check generic access for data race
* __kcsan_check_access - check generic access for data races
*
* @ptr address of access
* @size size of access
Expand All @@ -32,7 +32,7 @@ static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
#endif

/*
* kcsan_*: Only calls into runtime when the particular compilation unit has
* kcsan_*: Only calls into the runtime when the particular compilation unit has
* KCSAN instrumentation enabled. May be used in header files.
*/
#ifdef __SANITIZE_THREAD__
Expand Down Expand Up @@ -77,16 +77,12 @@ static inline void kcsan_check_access(const volatile void *ptr, size_t size,
kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)

/*
* Check for atomic accesses: if atomic access are not ignored, this simply
* aliases to kcsan_check_access, otherwise becomes a no-op.
* Check for atomic accesses: if atomic accesses are not ignored, this simply
* aliases to kcsan_check_access(), otherwise becomes a no-op.
*/
#ifdef CONFIG_KCSAN_IGNORE_ATOMICS
#define kcsan_check_atomic_read(...) \
do { \
} while (0)
#define kcsan_check_atomic_write(...) \
do { \
} while (0)
#define kcsan_check_atomic_read(...) do { } while (0)
#define kcsan_check_atomic_write(...) do { } while (0)
#else
#define kcsan_check_atomic_read(ptr, size) \
kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
Expand Down
23 changes: 8 additions & 15 deletions include/linux/kcsan.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,21 +94,14 @@ void kcsan_atomic_next(int n);

#else /* CONFIG_KCSAN */

static inline void kcsan_init(void) { }

static inline void kcsan_disable_current(void) { }

static inline void kcsan_enable_current(void) { }

static inline void kcsan_nestable_atomic_begin(void) { }

static inline void kcsan_nestable_atomic_end(void) { }

static inline void kcsan_flat_atomic_begin(void) { }

static inline void kcsan_flat_atomic_end(void) { }

static inline void kcsan_atomic_next(int n) { }
static inline void kcsan_init(void) { }
static inline void kcsan_disable_current(void) { }
static inline void kcsan_enable_current(void) { }
static inline void kcsan_nestable_atomic_begin(void) { }
static inline void kcsan_nestable_atomic_end(void) { }
static inline void kcsan_flat_atomic_begin(void) { }
static inline void kcsan_flat_atomic_end(void) { }
static inline void kcsan_atomic_next(int n) { }

#endif /* CONFIG_KCSAN */

Expand Down
8 changes: 4 additions & 4 deletions include/linux/seqlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@
*
* As a consequence, we take the following best-effort approach for raw usage
* via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
* pessimistically mark then next KCSAN_SEQLOCK_REGION_MAX memory accesses as
* pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as
* atomics; if there is a matching read_seqcount_retry() call, no following
* memory operations are considered atomic. Usage of seqlocks via seqlock_t
* interface is not affected.
Expand Down Expand Up @@ -265,7 +265,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
* usual consistency guarantee. It is one wmb cheaper, because we can
* collapse the two back-to-back wmb()s.
*
* Note that, writes surrounding the barrier should be declared atomic (e.g.
* Note that writes surrounding the barrier should be declared atomic (e.g.
* via WRITE_ONCE): a) to ensure the writes become visible to other threads
* atomically, avoiding compiler optimizations; b) to document which writes are
* meant to propagate to the reader critical section. This is necessary because
Expand Down Expand Up @@ -465,15 +465,15 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
{
unsigned ret = read_seqcount_begin(&sl->seqcount);

kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry */
kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */
kcsan_flat_atomic_begin();
return ret;
}

static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
/*
* Assume not nested: read_seqretry may be called multiple times when
* Assume not nested: read_seqretry() may be called multiple times when
* completing read critical section.
*/
kcsan_flat_atomic_end();
Expand Down
2 changes: 1 addition & 1 deletion kernel/kcsan/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#include <linux/jiffies.h>

/*
* Helper that returns true if access to ptr should be considered as an atomic
* Helper that returns true if access to @ptr should be considered an atomic
* access, even though it is not explicitly atomic.
*
* List all volatile globals that have been observed in races, to suppress
Expand Down
59 changes: 27 additions & 32 deletions kernel/kcsan/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@ bool kcsan_enabled;

/* Per-CPU kcsan_ctx for interrupts */
static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
.disable_count = 0,
.atomic_next = 0,
.atomic_nest_count = 0,
.in_flat_atomic = false,
.disable_count = 0,
.atomic_next = 0,
.atomic_nest_count = 0,
.in_flat_atomic = false,
};

/*
Expand Down Expand Up @@ -50,11 +50,11 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
* slot=9: [10, 11, 9]
* slot=63: [64, 65, 63]
*/
#define NUM_SLOTS (1 + 2 * KCSAN_CHECK_ADJACENT)
#define NUM_SLOTS (1 + 2*KCSAN_CHECK_ADJACENT)
#define SLOT_IDX(slot, i) (slot + ((i + KCSAN_CHECK_ADJACENT) % NUM_SLOTS))

/*
* SLOT_IDX_FAST is used in fast-path. Not first checking the address's primary
* SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
* slot (middle) is fine if we assume that data races occur rarely. The set of
* indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
* {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
Expand All @@ -68,17 +68,18 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
* zero-initialized state matches INVALID_WATCHPOINT.
*
* Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
* use more complicated SLOT_IDX_FAST calculation with modulo in fast-path.
* use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
*/
static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS - 1];
static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];

/*
* Instructions to skip watching counter, used in should_watch(). We use a
* per-CPU counter to avoid excessive contention.
*/
static DEFINE_PER_CPU(long, kcsan_skip);

static inline atomic_long_t *find_watchpoint(unsigned long addr, size_t size,
static inline atomic_long_t *find_watchpoint(unsigned long addr,
size_t size,
bool expect_write,
long *encoded_watchpoint)
{
Expand Down Expand Up @@ -110,8 +111,8 @@ static inline atomic_long_t *find_watchpoint(unsigned long addr, size_t size,
return NULL;
}

static inline atomic_long_t *insert_watchpoint(unsigned long addr, size_t size,
bool is_write)
static inline atomic_long_t *
insert_watchpoint(unsigned long addr, size_t size, bool is_write)
{
const int slot = watchpoint_slot(addr);
const long encoded_watchpoint = encode_watchpoint(addr, size, is_write);
Expand All @@ -120,21 +121,16 @@ static inline atomic_long_t *insert_watchpoint(unsigned long addr, size_t size,

/* Check slot index logic, ensuring we stay within array bounds. */
BUILD_BUG_ON(SLOT_IDX(0, 0) != KCSAN_CHECK_ADJACENT);
BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT + 1) != 0);
BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS - 1,
KCSAN_CHECK_ADJACENT) !=
ARRAY_SIZE(watchpoints) - 1);
BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS - 1,
KCSAN_CHECK_ADJACENT + 1) !=
ARRAY_SIZE(watchpoints) - NUM_SLOTS);
BUILD_BUG_ON(SLOT_IDX(0, KCSAN_CHECK_ADJACENT+1) != 0);
BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT) != ARRAY_SIZE(watchpoints)-1);
BUILD_BUG_ON(SLOT_IDX(CONFIG_KCSAN_NUM_WATCHPOINTS-1, KCSAN_CHECK_ADJACENT+1) != ARRAY_SIZE(watchpoints) - NUM_SLOTS);

for (i = 0; i < NUM_SLOTS; ++i) {
long expect_val = INVALID_WATCHPOINT;

/* Try to acquire this slot. */
watchpoint = &watchpoints[SLOT_IDX(slot, i)];
if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val,
encoded_watchpoint))
if (atomic_long_try_cmpxchg_relaxed(watchpoint, &expect_val, encoded_watchpoint))
return watchpoint;
}

Expand All @@ -150,26 +146,24 @@ static inline atomic_long_t *insert_watchpoint(unsigned long addr, size_t size,
* 2. the thread that set up the watchpoint already removed it;
* 3. the watchpoint was removed and then re-used.
*/
static inline bool try_consume_watchpoint(atomic_long_t *watchpoint,
long encoded_watchpoint)
static inline bool
try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
{
return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint,
CONSUMED_WATCHPOINT);
return atomic_long_try_cmpxchg_relaxed(watchpoint, &encoded_watchpoint, CONSUMED_WATCHPOINT);
}

/*
* Return true if watchpoint was not touched, false if consumed.
*/
static inline bool remove_watchpoint(atomic_long_t *watchpoint)
{
return atomic_long_xchg_relaxed(watchpoint, INVALID_WATCHPOINT) !=
CONSUMED_WATCHPOINT;
return atomic_long_xchg_relaxed(watchpoint, INVALID_WATCHPOINT) != CONSUMED_WATCHPOINT;
}

static inline struct kcsan_ctx *get_ctx(void)
{
/*
* In interrupt, use raw_cpu_ptr to avoid unnecessary checks, that would
* In interrupts, use raw_cpu_ptr to avoid unnecessary checks, that would
* also result in calls that generate warnings in uaccess regions.
*/
return in_task() ? &current->kcsan_ctx : raw_cpu_ptr(&kcsan_cpu_ctx);
Expand Down Expand Up @@ -260,7 +254,8 @@ static inline unsigned int get_delay(void)
*/

static noinline void kcsan_found_watchpoint(const volatile void *ptr,
size_t size, bool is_write,
size_t size,
bool is_write,
atomic_long_t *watchpoint,
long encoded_watchpoint)
{
Expand Down Expand Up @@ -296,8 +291,8 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
user_access_restore(flags);
}

static noinline void kcsan_setup_watchpoint(const volatile void *ptr,
size_t size, bool is_write)
static noinline void
kcsan_setup_watchpoint(const volatile void *ptr, size_t size, bool is_write)
{
atomic_long_t *watchpoint;
union {
Expand Down Expand Up @@ -346,8 +341,8 @@ static noinline void kcsan_setup_watchpoint(const volatile void *ptr,
watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
if (watchpoint == NULL) {
/*
* Out of capacity: the size of `watchpoints`, and the frequency
* with which `should_watch()` returns true should be tweaked so
* Out of capacity: the size of 'watchpoints', and the frequency
* with which should_watch() returns true should be tweaked so
* that this case happens very rarely.
*/
kcsan_counter_inc(KCSAN_COUNTER_NO_CAPACITY);
Expand Down
Loading

0 comments on commit 5cbaefe

Please sign in to comment.